summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/net/ethernet/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/net/ethernet/sfc')
-rw-r--r--kernel/drivers/net/ethernet/sfc/Kconfig38
-rw-r--r--kernel/drivers/net/ethernet/sfc/Makefile8
-rw-r--r--kernel/drivers/net/ethernet/sfc/bitfield.h542
-rw-r--r--kernel/drivers/net/ethernet/sfc/ef10.c3713
-rw-r--r--kernel/drivers/net/ethernet/sfc/ef10_regs.h355
-rw-r--r--kernel/drivers/net/ethernet/sfc/efx.c3349
-rw-r--r--kernel/drivers/net/ethernet/sfc/efx.h255
-rw-r--r--kernel/drivers/net/ethernet/sfc/enum.h181
-rw-r--r--kernel/drivers/net/ethernet/sfc/ethtool.c1196
-rw-r--r--kernel/drivers/net/ethernet/sfc/falcon.c2892
-rw-r--r--kernel/drivers/net/ethernet/sfc/falcon_boards.c764
-rw-r--r--kernel/drivers/net/ethernet/sfc/farch.c2969
-rw-r--r--kernel/drivers/net/ethernet/sfc/farch_regs.h2932
-rw-r--r--kernel/drivers/net/ethernet/sfc/filter.h272
-rw-r--r--kernel/drivers/net/ethernet/sfc/io.h302
-rw-r--r--kernel/drivers/net/ethernet/sfc/mcdi.c1891
-rw-r--r--kernel/drivers/net/ethernet/sfc/mcdi.h361
-rw-r--r--kernel/drivers/net/ethernet/sfc/mcdi_mon.c534
-rw-r--r--kernel/drivers/net/ethernet/sfc/mcdi_pcol.h7907
-rw-r--r--kernel/drivers/net/ethernet/sfc/mcdi_port.c1035
-rw-r--r--kernel/drivers/net/ethernet/sfc/mdio_10g.c323
-rw-r--r--kernel/drivers/net/ethernet/sfc/mdio_10g.h110
-rw-r--r--kernel/drivers/net/ethernet/sfc/mtd.c133
-rw-r--r--kernel/drivers/net/ethernet/sfc/net_driver.h1504
-rw-r--r--kernel/drivers/net/ethernet/sfc/nic.c534
-rw-r--r--kernel/drivers/net/ethernet/sfc/nic.h868
-rw-r--r--kernel/drivers/net/ethernet/sfc/phy.h50
-rw-r--r--kernel/drivers/net/ethernet/sfc/ptp.c1939
-rw-r--r--kernel/drivers/net/ethernet/sfc/qt202x_phy.c495
-rw-r--r--kernel/drivers/net/ethernet/sfc/rx.c997
-rw-r--r--kernel/drivers/net/ethernet/sfc/selftest.c788
-rw-r--r--kernel/drivers/net/ethernet/sfc/selftest.h55
-rw-r--r--kernel/drivers/net/ethernet/sfc/siena.c1029
-rw-r--r--kernel/drivers/net/ethernet/sfc/siena_sriov.c1668
-rw-r--r--kernel/drivers/net/ethernet/sfc/tenxpress.c494
-rw-r--r--kernel/drivers/net/ethernet/sfc/tx.c1332
-rw-r--r--kernel/drivers/net/ethernet/sfc/txc43128_phy.c560
-rw-r--r--kernel/drivers/net/ethernet/sfc/vfdi.h255
-rw-r--r--kernel/drivers/net/ethernet/sfc/workarounds.h53
39 files changed, 44683 insertions, 0 deletions
diff --git a/kernel/drivers/net/ethernet/sfc/Kconfig b/kernel/drivers/net/ethernet/sfc/Kconfig
new file mode 100644
index 000000000..088921294
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/Kconfig
@@ -0,0 +1,38 @@
+config SFC
+ tristate "Solarflare SFC4000/SFC9000/SFC9100-family support"
+ depends on PCI
+ select MDIO
+ select CRC32
+ select I2C
+ select I2C_ALGOBIT
+ select PTP_1588_CLOCK
+ ---help---
+ This driver supports 10/40-gigabit Ethernet cards based on
+ the Solarflare SFC4000, SFC9000-family and SFC9100-family
+ controllers.
+
+ To compile this driver as a module, choose M here. The module
+ will be called sfc.
+config SFC_MTD
+ bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support"
+ depends on SFC && MTD && !(SFC=y && MTD=m)
+ default y
+ ---help---
+ This exposes the on-board flash and/or EEPROM as MTD devices
+ (e.g. /dev/mtd1). This is required to update the firmware or
+ the boot configuration under Linux.
+config SFC_MCDI_MON
+ bool "Solarflare SFC9000/SFC9100-family hwmon support"
+ depends on SFC && HWMON && !(SFC=y && HWMON=m)
+ default y
+ ---help---
+ This exposes the on-board firmware-managed sensors as a
+ hardware monitor device.
+config SFC_SRIOV
+ bool "Solarflare SFC9000-family SR-IOV support"
+ depends on SFC && PCI_IOV
+ default y
+ ---help---
+ This enables support for the SFC9000 I/O Virtualization
+ features, allowing accelerated network performance in
+ virtualized environments.
diff --git a/kernel/drivers/net/ethernet/sfc/Makefile b/kernel/drivers/net/ethernet/sfc/Makefile
new file mode 100644
index 000000000..3a83c0dca
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/Makefile
@@ -0,0 +1,8 @@
+sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
+ rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
+ tenxpress.o txc43128_phy.o falcon_boards.o \
+ mcdi.o mcdi_port.o mcdi_mon.o ptp.o
+sfc-$(CONFIG_SFC_MTD) += mtd.o
+sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
+
+obj-$(CONFIG_SFC) += sfc.o
diff --git a/kernel/drivers/net/ethernet/sfc/bitfield.h b/kernel/drivers/net/ethernet/sfc/bitfield.h
new file mode 100644
index 000000000..17d83f37f
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/bitfield.h
@@ -0,0 +1,542 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_BITFIELD_H
+#define EFX_BITFIELD_H
+
+/*
+ * Efx bitfield access
+ *
+ * Efx NICs make extensive use of bitfields up to 128 bits
+ * wide. Since there is no native 128-bit datatype on most systems,
+ * and since 64-bit datatypes are inefficient on 32-bit systems and
+ * vice versa, we wrap accesses in a way that uses the most efficient
+ * datatype.
+ *
+ * The NICs are PCI devices and therefore little-endian. Since most
+ * of the quantities that we deal with are DMAed to/from host memory,
+ * we define our datatypes (efx_oword_t, efx_qword_t and
+ * efx_dword_t) to be little-endian.
+ */
+
+/* Lowest bit numbers and widths */
+#define EFX_DUMMY_FIELD_LBN 0
+#define EFX_DUMMY_FIELD_WIDTH 0
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
+#define EFX_DWORD_0_LBN 0
+#define EFX_DWORD_0_WIDTH 32
+#define EFX_DWORD_1_LBN 32
+#define EFX_DWORD_1_WIDTH 32
+#define EFX_DWORD_2_LBN 64
+#define EFX_DWORD_2_WIDTH 32
+#define EFX_DWORD_3_LBN 96
+#define EFX_DWORD_3_WIDTH 32
+#define EFX_QWORD_0_LBN 0
+#define EFX_QWORD_0_WIDTH 64
+
+/* Specified attribute (e.g. LBN) of the specified field */
+#define EFX_VAL(field, attribute) field ## _ ## attribute
+/* Low bit number of the specified field */
+#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
+/* Bit width of the specified field */
+#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
+/* High bit number of the specified field */
+#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
+/* Mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ *
+ * The maximum width mask that can be generated is 64 bits.
+ */
+#define EFX_MASK64(width) \
+ ((width) == 64 ? ~((u64) 0) : \
+ (((((u64) 1) << (width))) - 1))
+
+/* Mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ *
+ * The maximum width mask that can be generated is 32 bits. Use
+ * EFX_MASK64 for higher width fields.
+ */
+#define EFX_MASK32(width) \
+ ((width) == 32 ? ~((u32) 0) : \
+ (((((u32) 1) << (width))) - 1))
+
+/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
+typedef union efx_dword {
+ __le32 u32[1];
+} efx_dword_t;
+
+/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
+typedef union efx_qword {
+ __le64 u64[1];
+ __le32 u32[2];
+ efx_dword_t dword[2];
+} efx_qword_t;
+
+/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
+typedef union efx_oword {
+ __le64 u64[2];
+ efx_qword_t qword[2];
+ __le32 u32[4];
+ efx_dword_t dword[4];
+} efx_oword_t;
+
+/* Format string and value expanders for printk */
+#define EFX_DWORD_FMT "%08x"
+#define EFX_QWORD_FMT "%08x:%08x"
+#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
+#define EFX_DWORD_VAL(dword) \
+ ((unsigned int) le32_to_cpu((dword).u32[0]))
+#define EFX_QWORD_VAL(qword) \
+ ((unsigned int) le32_to_cpu((qword).u32[1])), \
+ ((unsigned int) le32_to_cpu((qword).u32[0]))
+#define EFX_OWORD_VAL(oword) \
+ ((unsigned int) le32_to_cpu((oword).u32[3])), \
+ ((unsigned int) le32_to_cpu((oword).u32[2])), \
+ ((unsigned int) le32_to_cpu((oword).u32[1])), \
+ ((unsigned int) le32_to_cpu((oword).u32[0]))
+
+/*
+ * Extract bit field portion [low,high) from the native-endian element
+ * which contains bits [min,max).
+ *
+ * For example, suppose "element" represents the high 32 bits of a
+ * 64-bit value, and we wish to extract the bits belonging to the bit
+ * field occupying bits 28-45 of this 64-bit value.
+ *
+ * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
+ *
+ * ( element ) << 4
+ *
+ * The result will contain the relevant bits filled in in the range
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
+ ((low) > (max) || (high) < (min) ? 0 : \
+ (low) > (min) ? \
+ (native_element) >> ((low) - (min)) : \
+ (native_element) << ((min) - (low)))
+
+/*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT64(element, min, max, low, high) \
+ EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT32(element, min, max, low, high) \
+ EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
+
+#define EFX_EXTRACT_OWORD64(oword, low, high) \
+ ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
+ EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
+ EFX_MASK64((high) + 1 - (low)))
+
+#define EFX_EXTRACT_QWORD64(qword, low, high) \
+ (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
+ EFX_MASK64((high) + 1 - (low)))
+
+#define EFX_EXTRACT_OWORD32(oword, low, high) \
+ ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
+ EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
+ EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
+ EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
+ EFX_MASK32((high) + 1 - (low)))
+
+#define EFX_EXTRACT_QWORD32(qword, low, high) \
+ ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
+ EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
+ EFX_MASK32((high) + 1 - (low)))
+
+#define EFX_EXTRACT_DWORD(dword, low, high) \
+ (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
+ EFX_MASK32((high) + 1 - (low)))
+
+#define EFX_OWORD_FIELD64(oword, field) \
+ EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_QWORD_FIELD64(qword, field) \
+ EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_OWORD_FIELD32(oword, field) \
+ EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_QWORD_FIELD32(qword, field) \
+ EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_DWORD_FIELD(dword, field) \
+ EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field))
+
+#define EFX_OWORD_IS_ZERO64(oword) \
+ (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
+
+#define EFX_QWORD_IS_ZERO64(qword) \
+ (((qword).u64[0]) == (__force __le64) 0)
+
+#define EFX_OWORD_IS_ZERO32(oword) \
+ (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
+ == (__force __le32) 0)
+
+#define EFX_QWORD_IS_ZERO32(qword) \
+ (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
+
+#define EFX_DWORD_IS_ZERO(dword) \
+ (((dword).u32[0]) == (__force __le32) 0)
+
+#define EFX_OWORD_IS_ALL_ONES64(oword) \
+ (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
+
+#define EFX_QWORD_IS_ALL_ONES64(qword) \
+ ((qword).u64[0] == ~((__force __le64) 0))
+
+#define EFX_OWORD_IS_ALL_ONES32(oword) \
+ (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
+ == ~((__force __le32) 0))
+
+#define EFX_QWORD_IS_ALL_ONES32(qword) \
+ (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
+
+#define EFX_DWORD_IS_ALL_ONES(dword) \
+ ((dword).u32[0] == ~((__force __le32) 0))
+
+#if BITS_PER_LONG == 64
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
+#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64
+#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64
+#else
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
+#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32
+#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32
+#endif
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the bit field [low,high) that lies within
+ * the range [min,max).
+ */
+#define EFX_INSERT_NATIVE64(min, max, low, high, value) \
+ (((low > max) || (high < min)) ? 0 : \
+ ((low > min) ? \
+ (((u64) (value)) << (low - min)) : \
+ (((u64) (value)) >> (min - low))))
+
+#define EFX_INSERT_NATIVE32(min, max, low, high, value) \
+ (((low > max) || (high < min)) ? 0 : \
+ ((low > min) ? \
+ (((u32) (value)) << (low - min)) : \
+ (((u32) (value)) >> (min - low))))
+
+#define EFX_INSERT_NATIVE(min, max, low, high, value) \
+ ((((max - min) >= 32) || ((high - low) >= 32)) ? \
+ EFX_INSERT_NATIVE64(min, max, low, high, value) : \
+ EFX_INSERT_NATIVE32(min, max, low, high, value))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \
+ EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+/*
+ * Construct bit field
+ *
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELDS_NATIVE(min, max, \
+ field1, value1, \
+ field2, value2, \
+ field3, value3, \
+ field4, value4, \
+ field5, value5, \
+ field6, value6, \
+ field7, value7, \
+ field8, value8, \
+ field9, value9, \
+ field10, value10) \
+ (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
+
+#define EFX_INSERT_FIELDS64(...) \
+ cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
+
+#define EFX_INSERT_FIELDS32(...) \
+ cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
+
+#define EFX_POPULATE_OWORD64(oword, ...) do { \
+ (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
+ (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
+ } while (0)
+
+#define EFX_POPULATE_QWORD64(qword, ...) do { \
+ (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
+ } while (0)
+
+#define EFX_POPULATE_OWORD32(oword, ...) do { \
+ (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
+ (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
+ (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
+ (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
+ } while (0)
+
+#define EFX_POPULATE_QWORD32(qword, ...) do { \
+ (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
+ (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
+ } while (0)
+
+#define EFX_POPULATE_DWORD(dword, ...) do { \
+ (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
+ } while (0)
+
+#if BITS_PER_LONG == 64
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
+#else
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
+#endif
+
+/* Populate an octword field with various numbers of arguments */
+#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
+#define EFX_POPULATE_OWORD_9(oword, ...) \
+ EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_8(oword, ...) \
+ EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_7(oword, ...) \
+ EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_6(oword, ...) \
+ EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_5(oword, ...) \
+ EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_4(oword, ...) \
+ EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_3(oword, ...) \
+ EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_2(oword, ...) \
+ EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_1(oword, ...) \
+ EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_OWORD(oword) \
+ EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_OWORD(oword) \
+ EFX_POPULATE_OWORD_4(oword, \
+ EFX_DWORD_0, 0xffffffff, \
+ EFX_DWORD_1, 0xffffffff, \
+ EFX_DWORD_2, 0xffffffff, \
+ EFX_DWORD_3, 0xffffffff)
+
+/* Populate a quadword field with various numbers of arguments */
+#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
+#define EFX_POPULATE_QWORD_9(qword, ...) \
+ EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_8(qword, ...) \
+ EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_7(qword, ...) \
+ EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_6(qword, ...) \
+ EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_5(qword, ...) \
+ EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_4(qword, ...) \
+ EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_3(qword, ...) \
+ EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_2(qword, ...) \
+ EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_1(qword, ...) \
+ EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_QWORD(qword) \
+ EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_QWORD(qword) \
+ EFX_POPULATE_QWORD_2(qword, \
+ EFX_DWORD_0, 0xffffffff, \
+ EFX_DWORD_1, 0xffffffff)
+
+/* Populate a dword field with various numbers of arguments */
+#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
+#define EFX_POPULATE_DWORD_9(dword, ...) \
+ EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_8(dword, ...) \
+ EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_7(dword, ...) \
+ EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_6(dword, ...) \
+ EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_5(dword, ...) \
+ EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_4(dword, ...) \
+ EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_3(dword, ...) \
+ EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_2(dword, ...) \
+ EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_1(dword, ...) \
+ EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_DWORD(dword) \
+ EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_DWORD(dword) \
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
+
+/*
+ * Modify a named field within an already-populated structure. Used
+ * for read-modify-write operations.
+ *
+ */
+#define EFX_INVERT_OWORD(oword) do { \
+ (oword).u64[0] = ~((oword).u64[0]); \
+ (oword).u64[1] = ~((oword).u64[1]); \
+ } while (0)
+
+#define EFX_AND_OWORD(oword, from, mask) \
+ do { \
+ (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
+ (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
+ } while (0)
+
+#define EFX_OR_OWORD(oword, from, mask) \
+ do { \
+ (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
+ (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
+ } while (0)
+
+#define EFX_INSERT64(min, max, low, high, value) \
+ cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value))
+
+#define EFX_INSERT32(min, max, low, high, value) \
+ cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
+
+#define EFX_INPLACE_MASK64(min, max, low, high) \
+ EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
+
+#define EFX_INPLACE_MASK32(min, max, low, high) \
+ EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
+
+#define EFX_SET_OWORD64(oword, low, high, value) do { \
+ (oword).u64[0] = (((oword).u64[0] \
+ & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
+ | EFX_INSERT64(0, 63, low, high, value)); \
+ (oword).u64[1] = (((oword).u64[1] \
+ & ~EFX_INPLACE_MASK64(64, 127, low, high)) \
+ | EFX_INSERT64(64, 127, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_QWORD64(qword, low, high, value) do { \
+ (qword).u64[0] = (((qword).u64[0] \
+ & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
+ | EFX_INSERT64(0, 63, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_OWORD32(oword, low, high, value) do { \
+ (oword).u32[0] = (((oword).u32[0] \
+ & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
+ | EFX_INSERT32(0, 31, low, high, value)); \
+ (oword).u32[1] = (((oword).u32[1] \
+ & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
+ | EFX_INSERT32(32, 63, low, high, value)); \
+ (oword).u32[2] = (((oword).u32[2] \
+ & ~EFX_INPLACE_MASK32(64, 95, low, high)) \
+ | EFX_INSERT32(64, 95, low, high, value)); \
+ (oword).u32[3] = (((oword).u32[3] \
+ & ~EFX_INPLACE_MASK32(96, 127, low, high)) \
+ | EFX_INSERT32(96, 127, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_QWORD32(qword, low, high, value) do { \
+ (qword).u32[0] = (((qword).u32[0] \
+ & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
+ | EFX_INSERT32(0, 31, low, high, value)); \
+ (qword).u32[1] = (((qword).u32[1] \
+ & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
+ | EFX_INSERT32(32, 63, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_DWORD32(dword, low, high, value) do { \
+ (dword).u32[0] = (((dword).u32[0] \
+ & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
+ | EFX_INSERT32(0, 31, low, high, value)); \
+ } while (0)
+
+#define EFX_SET_OWORD_FIELD64(oword, field, value) \
+ EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_QWORD_FIELD64(qword, field, value) \
+ EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_OWORD_FIELD32(oword, field, value) \
+ EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_QWORD_FIELD32(qword, field, value) \
+ EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_DWORD_FIELD(dword, field, value) \
+ EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \
+ EFX_HIGH_BIT(field), value)
+
+
+
+#if BITS_PER_LONG == 64
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
+#else
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
+#endif
+
+/* Used to avoid compiler warnings about shift range exceeding width
+ * of the data types when dma_addr_t is only 32 bits wide.
+ */
+#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
+#define EFX_DMA_TYPE_WIDTH(width) \
+ (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
+
+
+/* Static initialiser */
+#define EFX_OWORD32(a, b, c, d) \
+ { .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
+ cpu_to_le32(c), cpu_to_le32(d) } }
+
+#endif /* EFX_BITFIELD_H */
diff --git a/kernel/drivers/net/ethernet/sfc/ef10.c b/kernel/drivers/net/ethernet/sfc/ef10.c
new file mode 100644
index 000000000..fbb6cfa0f
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/ef10.c
@@ -0,0 +1,3713 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2012-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "ef10_regs.h"
+#include "io.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+#include "workarounds.h"
+#include "selftest.h"
+#include <linux/in.h>
+#include <linux/jhash.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+/* Hardware control for EF10 architecture including 'Huntington'. */
+
+#define EFX_EF10_DRVGEN_EV 7
+enum {
+ EFX_EF10_TEST = 1,
+ EFX_EF10_REFILL,
+};
+
+/* The reserved RSS context value */
+#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
+
+/* The filter table(s) are managed by firmware and we have write-only
+ * access. When removing filters we must identify them to the
+ * firmware by a 64-bit handle, but this is too wide for Linux kernel
+ * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
+ * be able to tell in advance whether a requested insertion will
+ * replace an existing filter. Therefore we maintain a software hash
+ * table, which should be at least as large as the hardware hash
+ * table.
+ *
+ * Huntington has a single 8K filter table shared between all filter
+ * types and both ports.
+ */
+#define HUNT_FILTER_TBL_ROWS 8192
+
+struct efx_ef10_filter_table {
+/* The RX match field masks supported by this fw & hw, in order of priority */
+ enum efx_filter_match_flags rx_match_flags[
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
+ unsigned int rx_match_count;
+
+ struct {
+ unsigned long spec; /* pointer to spec plus flag bits */
+/* BUSY flag indicates that an update is in progress. AUTO_OLD is
+ * used to mark and sweep MAC filters for the device address lists.
+ */
+#define EFX_EF10_FILTER_FLAG_BUSY 1UL
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
+#define EFX_EF10_FILTER_FLAGS 3UL
+ u64 handle; /* firmware handle */
+ } *entry;
+ wait_queue_head_t waitq;
+/* Shadow of net_device address lists, guarded by mac_lock */
+#define EFX_EF10_FILTER_DEV_UC_MAX 32
+#define EFX_EF10_FILTER_DEV_MC_MAX 256
+ struct {
+ u8 addr[ETH_ALEN];
+ u16 id;
+ } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
+ dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+ int dev_uc_count; /* negative for PROMISC */
+ int dev_mc_count; /* negative for PROMISC/ALLMULTI */
+};
+
+/* An arbitrary search limit for the software hash table */
+#define EFX_EF10_FILTER_SEARCH_LIMIT 200
+
+static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
+static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
+static void efx_ef10_filter_table_remove(struct efx_nic *efx);
+
+static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
+{
+ efx_dword_t reg;
+
+ efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
+ return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
+ EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
+}
+
+static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
+{
+ return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
+}
+
+static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf)) {
+ netif_err(efx, drv, efx->net_dev,
+ "unable to read datapath firmware capabilities\n");
+ return -EIO;
+ }
+
+ nic_data->datapath_caps =
+ MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
+ netif_err(efx, drv, efx->net_dev,
+ "current firmware does not support TSO\n");
+ return -ENODEV;
+ }
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
+ netif_err(efx, probe, efx->net_dev,
+ "current firmware does not support an RX prefix\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+ rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
+ return rc > 0 ? rc : -ERANGE;
+}
+
+static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
+ return -EIO;
+
+ ether_addr_copy(mac_address,
+ MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
+ return 0;
+}
+
+static int efx_ef10_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data;
+ int i, rc;
+
+ /* We can have one VI for each 8K region. However, until we
+ * use TX option descriptors we need two TX queues per channel.
+ */
+ efx->max_channels =
+ min_t(unsigned int,
+ EFX_MAX_CHANNELS,
+ resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
+ (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+ if (WARN_ON(efx->max_channels == 0))
+ return -EIO;
+
+ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
+ efx->nic_data = nic_data;
+
+ rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
+ 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
+ if (rc)
+ goto fail1;
+
+ /* Get the MC's warm boot count. In case it's rebooting right
+ * now, be prepared to retry.
+ */
+ i = 0;
+ for (;;) {
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc >= 0)
+ break;
+ if (++i == 5)
+ goto fail2;
+ ssleep(1);
+ }
+ nic_data->warm_boot_count = rc;
+
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+ /* In case we're recovering from a crash (kexec), we want to
+ * cancel any outstanding request by the previous user of this
+ * function. We send a special message using the least
+ * significant bits of the 'high' (doorbell) register.
+ */
+ _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
+
+ rc = efx_mcdi_init(efx);
+ if (rc)
+ goto fail2;
+
+ /* Reset (most) configuration for this function */
+ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
+ if (rc)
+ goto fail3;
+
+ /* Enable event logging */
+ rc = efx_mcdi_log_ctrl(efx, true, false, 0);
+ if (rc)
+ goto fail3;
+
+ rc = efx_ef10_init_datapath_caps(efx);
+ if (rc < 0)
+ goto fail3;
+
+ efx->rx_packet_len_offset =
+ ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
+
+ rc = efx_mcdi_port_get_number(efx);
+ if (rc < 0)
+ goto fail3;
+ efx->port_num = rc;
+
+ rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
+ if (rc)
+ goto fail3;
+
+ rc = efx_ef10_get_sysclk_freq(efx);
+ if (rc < 0)
+ goto fail3;
+ efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
+
+ /* Check whether firmware supports bug 35388 workaround */
+ rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
+ if (rc == 0)
+ nic_data->workaround_35388 = true;
+ else if (rc != -ENOSYS && rc != -ENOENT)
+ goto fail3;
+ netif_dbg(efx, probe, efx->net_dev,
+ "workaround for bug 35388 is %sabled\n",
+ nic_data->workaround_35388 ? "en" : "dis");
+
+ rc = efx_mcdi_mon_probe(efx);
+ if (rc)
+ goto fail3;
+
+ efx_ptp_probe(efx, NULL);
+
+ return 0;
+
+fail3:
+ efx_mcdi_fini(efx);
+fail2:
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+fail1:
+ kfree(nic_data);
+ efx->nic_data = NULL;
+ return rc;
+}
+
+static int efx_ef10_free_vis(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ size_t outlen;
+ int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+
+ /* -EALREADY means nothing to free, so ignore */
+ if (rc == -EALREADY)
+ rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
+ rc);
+ return rc;
+}
+
+#ifdef EFX_USE_PIO
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
+ unsigned int i;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
+
+ for (i = 0; i < nic_data->n_piobufs; i++) {
+ MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
+ nic_data->piobuf_handle[i]);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ WARN_ON(rc);
+ }
+
+ nic_data->n_piobufs = 0;
+}
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
+ unsigned int i;
+ size_t outlen;
+ int rc = 0;
+
+ BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
+
+ for (i = 0; i < n; i++) {
+ rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ break;
+ if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+ rc = -EIO;
+ break;
+ }
+ nic_data->piobuf_handle[i] =
+ MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocated PIO buffer %u handle %x\n", i,
+ nic_data->piobuf_handle[i]);
+ }
+
+ nic_data->n_piobufs = i;
+ if (rc)
+ efx_ef10_free_piobufs(efx);
+ return rc;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf,
+ max(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_IN_LEN));
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ unsigned int offset, index;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
+ BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
+
+ /* Link a buffer to each VI in the write-combining mapping */
+ for (index = 0; index < nic_data->n_piobufs; ++index) {
+ MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
+ nic_data->piobuf_handle[index]);
+ MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
+ nic_data->pio_write_vi_base + index);
+ rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+ inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+ NULL, 0, NULL);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to link VI %u to PIO buffer %u (%d)\n",
+ nic_data->pio_write_vi_base + index, index,
+ rc);
+ goto fail;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "linked VI %u to PIO buffer %u\n",
+ nic_data->pio_write_vi_base + index, index);
+ }
+
+ /* Link a buffer to each TX queue */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ /* We assign the PIO buffers to queues in
+ * reverse order to allow for the following
+ * special case.
+ */
+ offset = ((efx->tx_channel_offset + efx->n_tx_channels -
+ tx_queue->channel->channel - 1) *
+ efx_piobuf_size);
+ index = offset / ER_DZ_TX_PIOBUF_SIZE;
+ offset = offset % ER_DZ_TX_PIOBUF_SIZE;
+
+ /* When the host page size is 4K, the first
+ * host page in the WC mapping may be within
+ * the same VI page as the last TX queue. We
+ * can only link one buffer to each VI.
+ */
+ if (tx_queue->queue == nic_data->pio_write_vi_base) {
+ BUG_ON(index != 0);
+ rc = 0;
+ } else {
+ MCDI_SET_DWORD(inbuf,
+ LINK_PIOBUF_IN_PIOBUF_HANDLE,
+ nic_data->piobuf_handle[index]);
+ MCDI_SET_DWORD(inbuf,
+ LINK_PIOBUF_IN_TXQ_INSTANCE,
+ tx_queue->queue);
+ rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+ inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+ NULL, 0, NULL);
+ }
+
+ if (rc) {
+ /* This is non-fatal; the TX path just
+ * won't use PIO for this queue
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "failed to link VI %u to PIO buffer %u (%d)\n",
+ tx_queue->queue, index, rc);
+ tx_queue->piobuf = NULL;
+ } else {
+ tx_queue->piobuf =
+ nic_data->pio_write_base +
+ index * EFX_VI_PAGE_SIZE + offset;
+ tx_queue->piobuf_offset = offset;
+ netif_dbg(efx, probe, efx->net_dev,
+ "linked VI %u to PIO buffer %u offset %x addr %p\n",
+ tx_queue->queue, index,
+ tx_queue->piobuf_offset,
+ tx_queue->piobuf);
+ }
+ }
+ }
+
+ return 0;
+
+fail:
+ while (index--) {
+ MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
+ nic_data->pio_write_vi_base + index);
+ efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
+ inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ NULL, 0, NULL);
+ }
+ return rc;
+}
+
+#else /* !EFX_USE_PIO */
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+ return n == 0 ? 0 : -ENOBUFS;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+ return 0;
+}
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+}
+
+#endif /* EFX_USE_PIO */
+
+static void efx_ef10_remove(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ efx_ptp_remove(efx);
+
+ efx_mcdi_mon_remove(efx);
+
+ efx_ef10_rx_free_indir_table(efx);
+
+ if (nic_data->wc_membase)
+ iounmap(nic_data->wc_membase);
+
+ rc = efx_ef10_free_vis(efx);
+ WARN_ON(rc != 0);
+
+ if (!nic_data->must_restore_piobufs)
+ efx_ef10_free_piobufs(efx);
+
+ efx_mcdi_fini(efx);
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+ kfree(nic_data);
+}
+
+static int efx_ef10_alloc_vis(struct efx_nic *efx,
+ unsigned int min_vis, unsigned int max_vis)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
+ rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
+ return -EIO;
+
+ netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
+ MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
+
+ nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
+ nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
+ return 0;
+}
+
+/* Note that the failure path of this function does not free
+ * resources, as this will be done by efx_ef10_remove().
+ */
+static int efx_ef10_dimension_resources(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int uc_mem_map_size, wc_mem_map_size;
+ unsigned int min_vis, pio_write_vi_base, max_vis;
+ void __iomem *membase;
+ int rc;
+
+ min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef EFX_USE_PIO
+ /* Try to allocate PIO buffers if wanted and if the full
+ * number of PIO buffers would be sufficient to allocate one
+ * copy-buffer per TX channel. Failure is non-fatal, as there
+ * are only a small number of PIO buffers shared between all
+ * functions of the controller.
+ */
+ if (efx_piobuf_size != 0 &&
+ ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
+ efx->n_tx_channels) {
+ unsigned int n_piobufs =
+ DIV_ROUND_UP(efx->n_tx_channels,
+ ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
+
+ rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
+ if (rc)
+ netif_err(efx, probe, efx->net_dev,
+ "failed to allocate PIO buffers (%d)\n", rc);
+ else
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocated %u PIO buffers\n", n_piobufs);
+ }
+#else
+ nic_data->n_piobufs = 0;
+#endif
+
+ /* PIO buffers should be mapped with write-combining enabled,
+ * and we want to make single UC and WC mappings rather than
+ * several of each (in fact that's the only option if host
+ * page size is >4K). So we may allocate some extra VIs just
+ * for writing PIO buffers through.
+ *
+ * The UC mapping contains (min_vis - 1) complete VIs and the
+ * first half of the next VI. Then the WC mapping begins with
+ * the second half of this last VI.
+ */
+ uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+ ER_DZ_TX_PIOBUF);
+ if (nic_data->n_piobufs) {
+ /* pio_write_vi_base rounds down to give the number of complete
+ * VIs inside the UC mapping.
+ */
+ pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+ wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
+ nic_data->n_piobufs) *
+ EFX_VI_PAGE_SIZE) -
+ uc_mem_map_size);
+ max_vis = pio_write_vi_base + nic_data->n_piobufs;
+ } else {
+ pio_write_vi_base = 0;
+ wc_mem_map_size = 0;
+ max_vis = min_vis;
+ }
+
+ /* In case the last attached driver failed to free VIs, do it now */
+ rc = efx_ef10_free_vis(efx);
+ if (rc != 0)
+ return rc;
+
+ rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
+ if (rc != 0)
+ return rc;
+
+ /* If we didn't get enough VIs to map all the PIO buffers, free the
+ * PIO buffers
+ */
+ if (nic_data->n_piobufs &&
+ nic_data->n_allocated_vis <
+ pio_write_vi_base + nic_data->n_piobufs) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%u VIs are not sufficient to map %u PIO buffers\n",
+ nic_data->n_allocated_vis, nic_data->n_piobufs);
+ efx_ef10_free_piobufs(efx);
+ }
+
+ /* Shrink the original UC mapping of the memory BAR */
+ membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
+ if (!membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not shrink memory BAR to %x\n",
+ uc_mem_map_size);
+ return -ENOMEM;
+ }
+ iounmap(efx->membase);
+ efx->membase = membase;
+
+ /* Set up the WC mapping if needed */
+ if (wc_mem_map_size) {
+ nic_data->wc_membase = ioremap_wc(efx->membase_phys +
+ uc_mem_map_size,
+ wc_mem_map_size);
+ if (!nic_data->wc_membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not allocate WC mapping of size %x\n",
+ wc_mem_map_size);
+ return -ENOMEM;
+ }
+ nic_data->pio_write_vi_base = pio_write_vi_base;
+ nic_data->pio_write_base =
+ nic_data->wc_membase +
+ (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
+ uc_mem_map_size);
+
+ rc = efx_ef10_link_piobufs(efx);
+ if (rc)
+ efx_ef10_free_piobufs(efx);
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
+ &efx->membase_phys, efx->membase, uc_mem_map_size,
+ nic_data->wc_membase, wc_mem_map_size);
+
+ return 0;
+}
+
+static int efx_ef10_init_nic(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (nic_data->must_check_datapath_caps) {
+ rc = efx_ef10_init_datapath_caps(efx);
+ if (rc)
+ return rc;
+ nic_data->must_check_datapath_caps = false;
+ }
+
+ if (nic_data->must_realloc_vis) {
+ /* We cannot let the number of VIs change now */
+ rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
+ nic_data->n_allocated_vis);
+ if (rc)
+ return rc;
+ nic_data->must_realloc_vis = false;
+ }
+
+ if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
+ rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
+ if (rc == 0) {
+ rc = efx_ef10_link_piobufs(efx);
+ if (rc)
+ efx_ef10_free_piobufs(efx);
+ }
+
+ /* Log an error on failure, but this is non-fatal */
+ if (rc)
+ netif_err(efx, drv, efx->net_dev,
+ "failed to restore PIO buffers (%d)\n", rc);
+ nic_data->must_restore_piobufs = false;
+ }
+
+ efx_ef10_rx_push_rss_config(efx);
+ return 0;
+}
+
+static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /* All our allocations have been reset */
+ nic_data->must_realloc_vis = true;
+ nic_data->must_restore_filters = true;
+ nic_data->must_restore_piobufs = true;
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+}
+
+static int efx_ef10_map_reset_flags(u32 *flags)
+{
+ enum {
+ EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
+ ETH_RESET_SHARED_SHIFT),
+ EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC |
+ ETH_RESET_PHY | ETH_RESET_MGMT) <<
+ ETH_RESET_SHARED_SHIFT)
+ };
+
+ /* We assume for now that our PCI function is permitted to
+ * reset everything.
+ */
+
+ if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
+ *flags &= ~EF10_RESET_MC;
+ return RESET_TYPE_WORLD;
+ }
+
+ if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
+ *flags &= ~EF10_RESET_PORT;
+ return RESET_TYPE_ALL;
+ }
+
+ /* no invisible reset implemented */
+
+ return -EINVAL;
+}
+
+static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
+{
+ int rc = efx_mcdi_reset(efx, reset_type);
+
+ /* If it was a port reset, trigger reallocation of MC resources.
+ * Note that on an MC reset nothing needs to be done now because we'll
+ * detect the MC reset later and handle it then.
+ * For an FLR, we never get an MC reset event, but the MC has reset all
+ * resources assigned to us, so we have to trigger reallocation now.
+ */
+ if ((reset_type == RESET_TYPE_ALL ||
+ reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
+ efx_ef10_reset_mc_allocations(efx);
+ return rc;
+}
+
+#define EF10_DMA_STAT(ext_name, mcdi_name) \
+ [EF10_STAT_ ## ext_name] = \
+ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
+ [EF10_STAT_ ## int_name] = \
+ { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define EF10_OTHER_STAT(ext_name) \
+ [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+#define GENERIC_SW_STAT(ext_name) \
+ [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
+ EF10_DMA_STAT(tx_bytes, TX_BYTES),
+ EF10_DMA_STAT(tx_packets, TX_PKTS),
+ EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
+ EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
+ EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
+ EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
+ EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
+ EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
+ EF10_DMA_STAT(tx_64, TX_64_PKTS),
+ EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
+ EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
+ EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
+ EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(rx_bytes, RX_BYTES),
+ EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
+ EF10_OTHER_STAT(rx_good_bytes),
+ EF10_OTHER_STAT(rx_bad_bytes),
+ EF10_DMA_STAT(rx_packets, RX_PKTS),
+ EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
+ EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
+ EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
+ EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
+ EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
+ EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
+ EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
+ EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
+ EF10_DMA_STAT(rx_64, RX_64_PKTS),
+ EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
+ EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
+ EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
+ EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
+ EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
+ EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
+ EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
+ EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
+ EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+ GENERIC_SW_STAT(rx_nodesc_trunc),
+ GENERIC_SW_STAT(rx_noskb_drops),
+ EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+ EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+ EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+ EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+ EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
+ EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
+ EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+ EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+ EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+ EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+ EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
+ EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+};
+
+#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
+ (1ULL << EF10_STAT_tx_packets) | \
+ (1ULL << EF10_STAT_tx_pause) | \
+ (1ULL << EF10_STAT_tx_unicast) | \
+ (1ULL << EF10_STAT_tx_multicast) | \
+ (1ULL << EF10_STAT_tx_broadcast) | \
+ (1ULL << EF10_STAT_rx_bytes) | \
+ (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
+ (1ULL << EF10_STAT_rx_good_bytes) | \
+ (1ULL << EF10_STAT_rx_bad_bytes) | \
+ (1ULL << EF10_STAT_rx_packets) | \
+ (1ULL << EF10_STAT_rx_good) | \
+ (1ULL << EF10_STAT_rx_bad) | \
+ (1ULL << EF10_STAT_rx_pause) | \
+ (1ULL << EF10_STAT_rx_control) | \
+ (1ULL << EF10_STAT_rx_unicast) | \
+ (1ULL << EF10_STAT_rx_multicast) | \
+ (1ULL << EF10_STAT_rx_broadcast) | \
+ (1ULL << EF10_STAT_rx_lt64) | \
+ (1ULL << EF10_STAT_rx_64) | \
+ (1ULL << EF10_STAT_rx_65_to_127) | \
+ (1ULL << EF10_STAT_rx_128_to_255) | \
+ (1ULL << EF10_STAT_rx_256_to_511) | \
+ (1ULL << EF10_STAT_rx_512_to_1023) | \
+ (1ULL << EF10_STAT_rx_1024_to_15xx) | \
+ (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
+ (1ULL << EF10_STAT_rx_gtjumbo) | \
+ (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
+ (1ULL << EF10_STAT_rx_overflow) | \
+ (1ULL << EF10_STAT_rx_nodesc_drops) | \
+ (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
+ (1ULL << GENERIC_STAT_rx_noskb_drops))
+
+/* These statistics are only provided by the 10G MAC. For a 10G/40G
+ * switchable port we do not expose these because they might not
+ * include all the packets they should.
+ */
+#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
+ (1ULL << EF10_STAT_tx_lt64) | \
+ (1ULL << EF10_STAT_tx_64) | \
+ (1ULL << EF10_STAT_tx_65_to_127) | \
+ (1ULL << EF10_STAT_tx_128_to_255) | \
+ (1ULL << EF10_STAT_tx_256_to_511) | \
+ (1ULL << EF10_STAT_tx_512_to_1023) | \
+ (1ULL << EF10_STAT_tx_1024_to_15xx) | \
+ (1ULL << EF10_STAT_tx_15xx_to_jumbo))
+
+/* These statistics are only provided by the 40G MAC. For a 10G/40G
+ * switchable port we do expose these because the errors will otherwise
+ * be silent.
+ */
+#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
+ (1ULL << EF10_STAT_rx_length_error))
+
+/* These statistics are only provided if the firmware supports the
+ * capability PM_AND_RXDP_COUNTERS.
+ */
+#define HUNT_PM_AND_RXDP_STAT_MASK ( \
+ (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
+ (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
+ (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
+ (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
+ (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
+ (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
+ (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
+ (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
+ (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
+ (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
+ (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \
+ (1ULL << EF10_STAT_rx_dp_hlb_wait))
+
+static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
+{
+ u64 raw_mask = HUNT_COMMON_STAT_MASK;
+ u32 port_caps = efx_mcdi_phy_get_caps(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
+ else
+ raw_mask |= HUNT_10G_ONLY_STAT_MASK;
+
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
+ raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
+
+ return raw_mask;
+}
+
+static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
+{
+ u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+
+#if BITS_PER_LONG == 64
+ mask[0] = raw_mask;
+#else
+ mask[0] = raw_mask & 0xffffffff;
+ mask[1] = raw_mask >> 32;
+#endif
+}
+
+static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
+{
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+
+ efx_ef10_get_stat_mask(efx, mask);
+ return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
+ mask, names);
+}
+
+static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ __le64 generation_start, generation_end;
+ u64 *stats = nic_data->stats;
+ __le64 *dma_stats;
+
+ efx_ef10_get_stat_mask(efx, mask);
+
+ dma_stats = efx->stats_buffer.addr;
+ nic_data = efx->nic_data;
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
+ return 0;
+ rmb();
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
+ stats, efx->stats_buffer.addr, false);
+ rmb();
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start)
+ return -EAGAIN;
+
+ /* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
+ stats[EF10_STAT_rx_good_bytes] =
+ stats[EF10_STAT_rx_bytes] -
+ stats[EF10_STAT_rx_bytes_minus_good_bytes];
+ efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
+ stats[EF10_STAT_rx_bytes_minus_good_bytes]);
+ efx_update_sw_stats(efx, stats);
+ return 0;
+}
+
+
+static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ size_t stats_count = 0, index;
+ int retry;
+
+ efx_ef10_get_stat_mask(efx, mask);
+
+ /* If we're unlucky enough to read statistics during the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us)
+ */
+ for (retry = 0; retry < 100; ++retry) {
+ if (efx_ef10_try_update_nic_stats(efx) == 0)
+ break;
+ udelay(100);
+ }
+
+ if (full_stats) {
+ for_each_set_bit(index, mask, EF10_STAT_COUNT) {
+ if (efx_ef10_stat_desc[index].name) {
+ *full_stats++ = stats[index];
+ ++stats_count;
+ }
+ }
+ }
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[EF10_STAT_rx_packets];
+ core_stats->tx_packets = stats[EF10_STAT_tx_packets];
+ core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] +
+ stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[EF10_STAT_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[EF10_STAT_rx_gtjumbo] +
+ stats[EF10_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors);
+ }
+
+ return stats_count;
+}
+
+static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int mode, value;
+ efx_dword_t timer_cmd;
+
+ if (channel->irq_moderation) {
+ mode = 3;
+ value = channel->irq_moderation - 1;
+ } else {
+ mode = 0;
+ value = 0;
+ }
+
+ if (EFX_EF10_WORKAROUND_35388(efx)) {
+ EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
+ EFE_DD_EVQ_IND_TIMER_FLAGS,
+ ERF_DD_EVQ_IND_TIMER_MODE, mode,
+ ERF_DD_EVQ_IND_TIMER_VAL, value);
+ efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ } else {
+ EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, value);
+ efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
+ channel->channel);
+ }
+}
+
+static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
+{
+ if (type != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static void efx_ef10_mcdi_request(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 *pdu = nic_data->mcdi_buf.addr;
+
+ memcpy(pdu, hdr, hdr_len);
+ memcpy(pdu + hdr_len, sdu, sdu_len);
+ wmb();
+
+ /* The hardware provides 'low' and 'high' (doorbell) registers
+ * for passing the 64-bit address of an MCDI request to
+ * firmware. However the dwords are swapped by firmware. The
+ * least significant bits of the doorbell are then 0 for all
+ * MCDI requests due to alignment.
+ */
+ _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
+ ER_DZ_MC_DB_LWRD);
+ _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
+ ER_DZ_MC_DB_HWRD);
+}
+
+static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
+
+ rmb();
+ return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
+}
+
+static void
+efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
+ size_t offset, size_t outlen)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const u8 *pdu = nic_data->mcdi_buf.addr;
+
+ memcpy(outbuf, pdu + offset, outlen);
+}
+
+static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc < 0) {
+ /* The firmware is presumably in the process of
+ * rebooting. However, we are supposed to report each
+ * reboot just once, so we must only do that once we
+ * can read and store the updated warm boot count.
+ */
+ return 0;
+ }
+
+ if (rc == nic_data->warm_boot_count)
+ return 0;
+
+ nic_data->warm_boot_count = rc;
+
+ /* All our allocations have been reset */
+ efx_ef10_reset_mc_allocations(efx);
+
+ /* The datapath firmware might have been changed */
+ nic_data->must_check_datapath_caps = true;
+
+ /* MAC statistics have been cleared on the NIC; clear the local
+ * statistic that we update with efx_update_diff_stat().
+ */
+ nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+
+ return -EIO;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
+
+ if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
+ /* Note test interrupts */
+ if (context->index == efx->irq_level)
+ efx->last_irq_cpu = raw_smp_processor_id();
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel_irq(efx->channel[context->index]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ struct efx_channel *channel;
+ efx_dword_t reg;
+ u32 queues;
+
+ /* Read the ISR which also ACKs the interrupts */
+ efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
+ queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
+
+ if (queues == 0)
+ return IRQ_NONE;
+
+ if (likely(soft_enabled)) {
+ /* Note test interrupts */
+ if (queues & (1U << efx->irq_level))
+ efx->last_irq_cpu = raw_smp_processor_id();
+
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
+ }
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+
+ return IRQ_HANDLED;
+}
+
+static void efx_ef10_irq_test_generate(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
+ (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
+{
+ return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
+ (tx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+/* This writes to the TX_DESC_WPTR and also pushes data */
+static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned int write_ptr;
+ efx_oword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ ER_DZ_TX_DESC_UPD, tx_queue->queue);
+}
+
+static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
+ bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
+ struct efx_channel *channel = tx_queue->channel;
+ struct efx_nic *efx = tx_queue->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ efx_qword_t *txd;
+ int rc;
+ int i;
+
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = tx_queue->txd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
+ tx_queue->queue, entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ /* A previous user of this TX queue might have set us up the
+ * bomb by writing a descriptor to the TX push collector but
+ * not the doorbell. (Each collector belongs to a port, not a
+ * queue or function, so cannot easily be reset.) We must
+ * attempt to push a no-op descriptor in its place.
+ */
+ tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
+ tx_queue->insert_count = 1;
+ txd = efx_tx_desc(tx_queue, 0);
+ EFX_POPULATE_QWORD_4(*txd,
+ ESF_DZ_TX_DESC_IS_OPT, true,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+ ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
+ ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
+ tx_queue->write_count = 1;
+ wmb();
+ efx_ef10_push_tx_desc(tx_queue, txd);
+
+ return;
+
+fail:
+ netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
+ tx_queue->queue);
+}
+
+static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
+ struct efx_nic *efx = tx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
+ tx_queue->queue);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
+static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
+}
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned int write_ptr;
+ efx_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
+}
+
+static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
+{
+ unsigned int old_write_count = tx_queue->write_count;
+ struct efx_tx_buffer *buffer;
+ unsigned int write_ptr;
+ efx_qword_t *txd;
+
+ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = efx_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ /* Create TX descriptor ring entry */
+ if (buffer->flags & EFX_TX_BUF_OPTION) {
+ *txd = buffer->option;
+ } else {
+ BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
+ EFX_POPULATE_QWORD_3(
+ *txd,
+ ESF_DZ_TX_KER_CONT,
+ buffer->flags & EFX_TX_BUF_CONT,
+ ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
+ ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ }
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_ef10_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_ef10_notify_tx_desc(tx_queue);
+ }
+}
+
+static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
+ EFX_MAX_CHANNELS);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
+ return -EIO;
+
+ *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+
+ return 0;
+}
+
+static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
+ context);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ WARN_ON(rc != 0);
+}
+
+static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
+ int i, rc;
+
+ MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
+ MCDI_PTR(tablebuf,
+ RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
+ (u8) efx->rx_indir_table[i];
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
+ sizeof(tablebuf), NULL, 0, NULL);
+ if (rc != 0)
+ return rc;
+
+ MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
+ MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
+ efx->rx_hash_key[i];
+
+ return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
+ sizeof(keybuf), NULL, 0, NULL);
+}
+
+static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
+ efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+}
+
+static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
+
+ if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
+ rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
+ if (rc != 0)
+ goto fail;
+ }
+
+ rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
+ if (rc != 0)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
+ (rx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = rx_queue->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc;
+ int i;
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = rx_queue->rxd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
+ efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
+ efx_rx_queue_index(rx_queue));
+}
+
+static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
+ struct efx_nic *efx = rx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
+static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
+}
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ struct efx_rx_buffer *rx_buf;
+ efx_qword_t *rxd;
+
+ rxd = efx_rx_desc(rx_queue, index);
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ EFX_POPULATE_QWORD_2(*rxd,
+ ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
+ ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int write_count;
+ efx_dword_t reg;
+
+ /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
+ write_count = rx_queue->added_count & ~7;
+ if (rx_queue->notified_count == write_count)
+ return;
+
+ do
+ efx_ef10_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ while (++rx_queue->notified_count != write_count);
+
+ wmb();
+ EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
+ write_count & rx_queue->ptr_mask);
+ efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
+ efx_rx_queue_index(rx_queue));
+}
+
+static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
+
+static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_2(event,
+ ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
+ ESF_DZ_EV_DATA, EFX_EF10_REFILL);
+
+ MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
+
+ /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
+ * already swapped the data to little-endian order.
+ */
+ memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
+ sizeof(efx_qword_t));
+
+ efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
+ inbuf, sizeof(inbuf), 0,
+ efx_ef10_rx_defer_refill_complete, 0);
+}
+
+static void
+efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ /* nothing to do */
+}
+
+static int efx_ef10_ev_probe(struct efx_channel *channel)
+{
+ return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
+ (channel->eventq_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+static int efx_ef10_ev_init(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
+ size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = channel->efx;
+ struct efx_ef10_nic_data *nic_data;
+ bool supports_rx_merge;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc;
+ int i;
+
+ nic_data = efx->nic_data;
+ supports_rx_merge =
+ !!(nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
+ /* INIT_EVQ expects index in vector table, not absolute */
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = channel->eventq.buf.dma_addr;
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ /* IRQ return is ignored */
+ return rc;
+}
+
+static void efx_ef10_ev_fini(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
+ struct efx_nic *efx = channel->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+ outbuf, outlen, rc);
+}
+
+static void efx_ef10_ev_remove(struct efx_channel *channel)
+{
+ efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
+}
+
+static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
+ unsigned int rx_queue_label)
+{
+ struct efx_nic *efx = rx_queue->efx;
+
+ netif_info(efx, hw, efx->net_dev,
+ "rx event arrived on queue %d labeled as queue %u\n",
+ efx_rx_queue_index(rx_queue), rx_queue_label);
+
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+}
+
+static void
+efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
+ unsigned int actual, unsigned int expected)
+{
+ unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
+ struct efx_nic *efx = rx_queue->efx;
+
+ netif_info(efx, hw, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, actual, expected);
+
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+}
+
+/* partially received RX was aborted. clean up. */
+static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
+{
+ unsigned int rx_desc_ptr;
+
+ netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
+ "scattered RX aborted (dropping %u buffers)\n",
+ rx_queue->scatter_n);
+
+ rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
+
+ efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
+ 0, EFX_RX_PKT_DISCARD);
+
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+ ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
+}
+
+static int efx_ef10_handle_rx_event(struct efx_channel *channel,
+ const efx_qword_t *event)
+{
+ unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
+ unsigned int n_descs, n_packets, i;
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue;
+ bool rx_cont;
+ u16 flags = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ /* Basic packet information */
+ rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
+ next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
+ rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
+ rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
+ rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
+
+ if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
+ netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
+ EFX_QWORD_FMT "\n",
+ EFX_QWORD_VAL(*event));
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
+ efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
+
+ n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
+ ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
+
+ if (n_descs != rx_queue->scatter_n + 1) {
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /* detect rx abort */
+ if (unlikely(n_descs == rx_queue->scatter_n)) {
+ if (rx_queue->scatter_n == 0 || rx_bytes != 0)
+ netdev_WARN(efx->net_dev,
+ "invalid RX abort: scatter_n=%u event="
+ EFX_QWORD_FMT "\n",
+ rx_queue->scatter_n,
+ EFX_QWORD_VAL(*event));
+ efx_ef10_handle_rx_abort(rx_queue);
+ return 0;
+ }
+
+ /* Check that RX completion merging is valid, i.e.
+ * the current firmware supports it and this is a
+ * non-scattered packet.
+ */
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
+ rx_queue->scatter_n != 0 || rx_cont) {
+ efx_ef10_handle_rx_bad_lbits(
+ rx_queue, next_ptr_lbits,
+ (rx_queue->removed_count +
+ rx_queue->scatter_n + 1) &
+ ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
+ return 0;
+ }
+
+ /* Merged completion for multiple non-scattered packets */
+ rx_queue->scatter_n = 1;
+ rx_queue->scatter_len = 0;
+ n_packets = n_descs;
+ ++channel->n_rx_merge_events;
+ channel->n_rx_merge_packets += n_packets;
+ flags |= EFX_RX_PKT_PREFIX_LEN;
+ } else {
+ ++rx_queue->scatter_n;
+ rx_queue->scatter_len += rx_bytes;
+ if (rx_cont)
+ return 0;
+ n_packets = 1;
+ }
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
+ flags |= EFX_RX_PKT_DISCARD;
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
+ channel->n_rx_ip_hdr_chksum_err += n_packets;
+ } else if (unlikely(EFX_QWORD_FIELD(*event,
+ ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
+ channel->n_rx_tcp_udp_chksum_err += n_packets;
+ } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
+ rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
+ flags |= EFX_RX_PKT_CSUMMED;
+ }
+
+ if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
+ flags |= EFX_RX_PKT_TCP;
+
+ channel->irq_mod_score += 2 * n_packets;
+
+ /* Handle received packet(s) */
+ for (i = 0; i < n_packets; i++) {
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_queue->scatter_len,
+ flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ }
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ return n_packets;
+}
+
+static int
+efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_tx_queue *tx_queue;
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ int tx_descs = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
+ return 0;
+
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
+ tx_queue = efx_channel_get_tx_queue(channel,
+ tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
+
+ return tx_descs;
+}
+
+static void
+efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ int subcode;
+
+ subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
+
+ switch (subcode) {
+ case ESE_DZ_DRV_TIMER_EV:
+ case ESE_DZ_DRV_WAKE_UP_EV:
+ break;
+ case ESE_DZ_DRV_START_UP_EV:
+ /* event queue init complete. ok. */
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown driver event type %d"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, subcode,
+ EFX_QWORD_VAL(*event));
+
+ }
+}
+
+static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ u32 subcode;
+
+ subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
+
+ switch (subcode) {
+ case EFX_EF10_TEST:
+ channel->event_test_cpu = raw_smp_processor_id();
+ break;
+ case EFX_EF10_REFILL:
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here
+ */
+ efx_fast_push_rx_descriptors(&channel->rx_queue, true);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown driver event type %u"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, (unsigned) subcode,
+ EFX_QWORD_VAL(*event));
+ }
+}
+
+static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
+{
+ struct efx_nic *efx = channel->efx;
+ efx_qword_t event, *p_event;
+ unsigned int read_ptr;
+ int ev_code;
+ int tx_descs = 0;
+ int spent = 0;
+
+ if (quota <= 0)
+ return spent;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = efx_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!efx_event_present(&event))
+ break;
+
+ EFX_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
+
+ netif_vdbg(efx, drv, efx->net_dev,
+ "processing event on %d " EFX_QWORD_FMT "\n",
+ channel->channel, EFX_QWORD_VAL(event));
+
+ switch (ev_code) {
+ case ESE_DZ_EV_CODE_MCDI_EV:
+ efx_mcdi_process_event(channel, &event);
+ break;
+ case ESE_DZ_EV_CODE_RX_EV:
+ spent += efx_ef10_handle_rx_event(channel, &event);
+ if (spent >= quota) {
+ /* XXX can we split a merged event to
+ * avoid going over-quota?
+ */
+ spent = quota;
+ goto out;
+ }
+ break;
+ case ESE_DZ_EV_CODE_TX_EV:
+ tx_descs += efx_ef10_handle_tx_event(channel, &event);
+ if (tx_descs > efx->txq_entries) {
+ spent = quota;
+ goto out;
+ } else if (++spent == quota) {
+ goto out;
+ }
+ break;
+ case ESE_DZ_EV_CODE_DRIVER_EV:
+ efx_ef10_handle_driver_event(channel, &event);
+ if (++spent == quota)
+ goto out;
+ break;
+ case EFX_EF10_DRVGEN_EV:
+ efx_ef10_handle_driver_generated_event(channel, &event);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown event type %d"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, ev_code,
+ EFX_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+static void efx_ef10_ev_read_ack(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ efx_dword_t rptr;
+
+ if (EFX_EF10_WORKAROUND_35388(efx)) {
+ BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
+ (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
+ BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
+ (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
+
+ EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
+ ERF_DD_EVQ_IND_RPTR,
+ (channel->eventq_read_ptr &
+ channel->eventq_mask) >>
+ ERF_DD_EVQ_IND_RPTR_WIDTH);
+ efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
+ ERF_DD_EVQ_IND_RPTR,
+ channel->eventq_read_ptr &
+ ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
+ efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ } else {
+ EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
+ channel->eventq_read_ptr &
+ channel->eventq_mask);
+ efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
+ }
+}
+
+static void efx_ef10_ev_test_generate(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
+ struct efx_nic *efx = channel->efx;
+ efx_qword_t event;
+ int rc;
+
+ EFX_POPULATE_QWORD_2(event,
+ ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
+ ESF_DZ_EV_DATA, EFX_EF10_TEST);
+
+ MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
+
+ /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
+ * already swapped the data to little-endian order.
+ */
+ memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
+ sizeof(efx_qword_t));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc != 0)
+ goto fail;
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+void efx_ef10_handle_drain_event(struct efx_nic *efx)
+{
+ if (atomic_dec_and_test(&efx->active_queues))
+ wake_up(&efx->flush_wq);
+
+ WARN_ON(atomic_read(&efx->active_queues) < 0);
+}
+
+static int efx_ef10_fini_dmaq(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int pending;
+
+ /* If the MC has just rebooted, the TX/RX queues will have already been
+ * torn down, but efx->active_queues needs to be set to zero.
+ */
+ if (nic_data->must_realloc_vis) {
+ atomic_set(&efx->active_queues, 0);
+ return 0;
+ }
+
+ /* Do not attempt to write to the NIC during EEH recovery */
+ if (efx->state != STATE_RECOVERY) {
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_ef10_rx_fini(rx_queue);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_ef10_tx_fini(tx_queue);
+ }
+
+ wait_event_timeout(efx->flush_wq,
+ atomic_read(&efx->active_queues) == 0,
+ msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
+ pending = atomic_read(&efx->active_queues);
+ if (pending) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
+ pending);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static void efx_ef10_prepare_flr(struct efx_nic *efx)
+{
+ atomic_set(&efx->active_queues, 0);
+}
+
+static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if ((left->match_flags ^ right->match_flags) |
+ ((left->flags ^ right->flags) &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+ return false;
+
+ return memcmp(&left->outer_vid, &right->outer_vid,
+ sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+ return jhash2((const u32 *)&spec->outer_vid,
+ (sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ 0);
+ /* XXX should we randomise the initval? */
+}
+
+/* Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
+{
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
+ !is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ !ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] != 0xff)
+ return true;
+ }
+
+ return false;
+}
+
+static struct efx_filter_spec *
+efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
+ unsigned int filter_idx)
+{
+ return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
+ ~EFX_EF10_FILTER_FLAGS);
+}
+
+static unsigned int
+efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
+ unsigned int filter_idx)
+{
+ return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
+}
+
+static void
+efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
+ unsigned int filter_idx,
+ const struct efx_filter_spec *spec,
+ unsigned int flags)
+{
+ table->entry[filter_idx].spec = (unsigned long)spec | flags;
+}
+
+static void efx_ef10_filter_push_prep(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf, u64 handle,
+ bool replacing)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
+
+ if (replacing) {
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
+ } else {
+ u32 match_fields = 0;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_INSERT :
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
+
+ /* Convert match flags and values. Unlike almost
+ * everything else in MCDI, these fields are in
+ * network byte order.
+ */
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
+ match_fields |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
+ if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
+ match_fields |= \
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN; \
+ BUILD_BUG_ON( \
+ MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
+ sizeof(spec->gen_field)); \
+ memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
+ &spec->gen_field, sizeof(spec->gen_field)); \
+ }
+ COPY_FIELD(REM_HOST, rem_host, SRC_IP);
+ COPY_FIELD(LOC_HOST, loc_host, DST_IP);
+ COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
+ COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
+ COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
+ COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
+ COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
+ COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
+ COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
+ COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
+#undef COPY_FIELD
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
+ match_fields);
+ }
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
+ MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
+ MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ 0 : spec->dmaq_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
+ (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
+ MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+ if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
+ spec->rss_context !=
+ EFX_FILTER_RSS_CONTEXT_DEFAULT ?
+ spec->rss_context : nic_data->rx_rss_context);
+}
+
+static int efx_ef10_filter_push(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ u64 *handle, bool replacing)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
+ int rc;
+
+ efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc == 0)
+ *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (rc == -ENOSPC)
+ rc = -EBUSY; /* to match efx_farch_filter_insert() */
+ return rc;
+}
+
+static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
+ enum efx_filter_match_flags match_flags)
+{
+ unsigned int match_pri;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++)
+ if (table->rx_match_flags[match_pri] == match_flags)
+ return match_pri;
+
+ return -EPROTONOSUPPORT;
+}
+
+static s32 efx_ef10_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+ struct efx_filter_spec *saved_spec;
+ unsigned int match_pri, hash;
+ unsigned int priv_flags;
+ bool replacing = false;
+ int ins_index = -1;
+ DEFINE_WAIT(wait);
+ bool is_mc_recip;
+ s32 rc;
+
+ /* For now, only support RX filters */
+ if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
+ EFX_FILTER_FLAG_RX)
+ return -EINVAL;
+
+ rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
+ if (rc < 0)
+ return rc;
+ match_pri = rc;
+
+ hash = efx_ef10_filter_hash(spec);
+ is_mc_recip = efx_filter_is_mc_recipient(spec);
+ if (is_mc_recip)
+ bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+
+ /* Find any existing filters with the same match tuple or
+ * else a free slot to insert at. If any of them are busy,
+ * we have to wait and retry.
+ */
+ for (;;) {
+ unsigned int depth = 1;
+ unsigned int i;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (;;) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ if (table->entry[i].spec &
+ EFX_EF10_FILTER_FLAG_BUSY)
+ break;
+ if (spec->priority < saved_spec->priority &&
+ spec->priority != EFX_FILTER_PRI_AUTO) {
+ rc = -EPERM;
+ goto out_unlock;
+ }
+ if (!is_mc_recip) {
+ /* This is the only one */
+ if (spec->priority ==
+ saved_spec->priority &&
+ !replace_equal) {
+ rc = -EEXIST;
+ goto out_unlock;
+ }
+ ins_index = i;
+ goto found;
+ } else if (spec->priority >
+ saved_spec->priority ||
+ (spec->priority ==
+ saved_spec->priority &&
+ replace_equal)) {
+ if (ins_index < 0)
+ ins_index = i;
+ else
+ __set_bit(depth, mc_rem_map);
+ }
+ }
+
+ /* Once we reach the maximum search depth, use
+ * the first suitable slot or return -EBUSY if
+ * there was none
+ */
+ if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+ goto found;
+ }
+
+ ++depth;
+ }
+
+ prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&efx->filter_lock);
+ schedule();
+ }
+
+found:
+ /* Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ if (spec->priority == EFX_FILTER_PRI_AUTO &&
+ saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
+ /* Just make sure it won't be removed */
+ if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
+ table->entry[ins_index].spec &=
+ ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ rc = ins_index;
+ goto out_unlock;
+ }
+ replacing = true;
+ priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ *saved_spec = *spec;
+ priv_flags = 0;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec,
+ priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
+
+ /* Mark lower-priority multicast recipients busy prior to removal */
+ if (is_mc_recip) {
+ unsigned int depth, i;
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ if (test_bit(depth, mc_rem_map))
+ table->entry[i].spec |=
+ EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
+ replacing);
+
+ /* Finalise the software table entry */
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ if (replacing) {
+ /* Update the fields that may differ */
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
+ saved_spec->flags |=
+ EFX_FILTER_FLAG_RX_OVER_AUTO;
+ saved_spec->priority = spec->priority;
+ saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
+ saved_spec->flags |= spec->flags;
+ saved_spec->rss_context = spec->rss_context;
+ saved_spec->dmaq_id = spec->dmaq_id;
+ }
+ } else if (!replacing) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
+
+ /* Remove and finalise entries for lower-priority multicast
+ * recipients
+ */
+ if (is_mc_recip) {
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ unsigned int depth, i;
+
+ memset(inbuf, 0, sizeof(inbuf));
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ if (!test_bit(depth, mc_rem_map))
+ continue;
+
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+ priv_flags = efx_ef10_filter_entry_flags(table, i);
+
+ if (rc == 0) {
+ spin_unlock_bh(&efx->filter_lock);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[i].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ spin_lock_bh(&efx->filter_lock);
+ }
+
+ if (rc == 0) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ priv_flags = 0;
+ } else {
+ priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ efx_ef10_filter_set_entry(table, i, saved_spec,
+ priv_flags);
+ }
+ }
+
+ /* If successful, return the inserted filter ID */
+ if (rc == 0)
+ rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
+
+ wake_up_all(&table->waitq);
+out_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ finish_wait(&table->waitq, &wait);
+ return rc;
+}
+
+static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ /* no need to do anything here on EF10 */
+}
+
+/* Remove a filter.
+ * If !by_index, remove by ID
+ * If by_index, remove by index
+ * Filter ID may come from userland and must be range-checked.
+ */
+static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
+ unsigned int priority_mask,
+ u32 filter_id, bool by_index)
+{
+ unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+ struct efx_filter_spec *spec;
+ DEFINE_WAIT(wait);
+ int rc;
+
+ /* Find the software table entry and mark it busy. Don't
+ * remove it yet; any attempt to update while we're waiting
+ * for the firmware must find the busy entry.
+ */
+ for (;;) {
+ spin_lock_bh(&efx->filter_lock);
+ if (!(table->entry[filter_idx].spec &
+ EFX_EF10_FILTER_FLAG_BUSY))
+ break;
+ prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&efx->filter_lock);
+ schedule();
+ }
+
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec ||
+ (!by_index &&
+ efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
+ filter_id / HUNT_FILTER_TBL_ROWS)) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
+ priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
+ /* Just remove flags */
+ spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ rc = 0;
+ goto out_unlock;
+ }
+
+ if (!(priority_mask & (1U << spec->priority))) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ spin_unlock_bh(&efx->filter_lock);
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ /* Reset to an automatic filter */
+
+ struct efx_filter_spec new_spec = *spec;
+
+ new_spec.priority = EFX_FILTER_PRI_AUTO;
+ new_spec.flags = (EFX_FILTER_FLAG_RX |
+ EFX_FILTER_FLAG_RX_RSS);
+ new_spec.dmaq_id = 0;
+ new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
+ rc = efx_ef10_filter_push(efx, &new_spec,
+ &table->entry[filter_idx].handle,
+ true);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0)
+ *spec = new_spec;
+ } else {
+ /* Really remove the filter */
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ }
+
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ wake_up_all(&table->waitq);
+out_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ finish_wait(&table->waitq, &wait);
+ return rc;
+}
+
+static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx_ef10_filter_remove_internal(efx, 1U << priority,
+ filter_id, false);
+}
+
+static int efx_ef10_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ const struct efx_filter_spec *saved_spec;
+ int rc;
+
+ spin_lock_bh(&efx->filter_lock);
+ saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (saved_spec && saved_spec->priority == priority &&
+ efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
+ filter_id / HUNT_FILTER_TBL_ROWS) {
+ *spec = *saved_spec;
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ unsigned int priority_mask;
+ unsigned int i;
+ int rc;
+
+ priority_mask = (((1U << (priority + 1)) - 1) &
+ ~(1U << EFX_FILTER_PRI_AUTO));
+
+ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
+ rc = efx_ef10_filter_remove_internal(efx, priority_mask,
+ i, true);
+ if (rc && rc != -ENOENT)
+ return rc;
+ }
+
+ return 0;
+}
+
+static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ if (table->entry[filter_idx].spec &&
+ efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
+ priority)
+ ++count;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return count;
+}
+
+static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+
+ return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
+}
+
+static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (spec && spec->priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ break;
+ }
+ buf[count++] = (efx_ef10_filter_rx_match_pri(
+ table, spec->match_flags) *
+ HUNT_FILTER_TBL_ROWS +
+ filter_idx);
+ }
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return count;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
+
+static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ struct efx_filter_spec *saved_spec;
+ unsigned int hash, i, depth = 1;
+ bool replacing = false;
+ int ins_index = -1;
+ u64 cookie;
+ s32 rc;
+
+ /* Must be an RX filter without RSS and not for a multicast
+ * destination address (RFS only works for connected sockets).
+ * These restrictions allow us to pass only a tiny amount of
+ * data through to the completion function.
+ */
+ EFX_WARN_ON_PARANOID(spec->flags !=
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
+ EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
+ EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
+
+ hash = efx_ef10_filter_hash(spec);
+
+ spin_lock_bh(&efx->filter_lock);
+
+ /* Find any existing filter with the same match tuple or else
+ * a free slot to insert at. If an existing filter is busy,
+ * we have to give up.
+ */
+ for (;;) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
+ rc = -EBUSY;
+ goto fail_unlock;
+ }
+ if (spec->priority < saved_spec->priority) {
+ rc = -EPERM;
+ goto fail_unlock;
+ }
+ ins_index = i;
+ break;
+ }
+
+ /* Once we reach the maximum search depth, use the
+ * first suitable slot or return -EBUSY if there was
+ * none
+ */
+ if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto fail_unlock;
+ }
+ break;
+ }
+
+ ++depth;
+ }
+
+ /* Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ replacing = true;
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto fail_unlock;
+ }
+ *saved_spec = *spec;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec,
+ EFX_EF10_FILTER_FLAG_BUSY);
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ /* Pack up the variables needed on completion */
+ cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
+
+ efx_ef10_filter_push_prep(efx, spec, inbuf,
+ table->entry[ins_index].handle, replacing);
+ efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ MC_CMD_FILTER_OP_OUT_LEN,
+ efx_ef10_filter_rfs_insert_complete, cookie);
+
+ return ins_index;
+
+fail_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void
+efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int ins_index, dmaq_id;
+ struct efx_filter_spec *spec;
+ bool replacing;
+
+ /* Unpack the cookie */
+ replacing = cookie >> 31;
+ ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
+ dmaq_id = cookie & 0xffff;
+
+ spin_lock_bh(&efx->filter_lock);
+ spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (rc == 0) {
+ table->entry[ins_index].handle =
+ MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (replacing)
+ spec->dmaq_id = dmaq_id;
+ } else if (!replacing) {
+ kfree(spec);
+ spec = NULL;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, spec, 0);
+ spin_unlock_bh(&efx->filter_lock);
+
+ wake_up_all(&table->waitq);
+}
+
+static void
+efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
+ unsigned long filter_idx,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual);
+
+static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int filter_idx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec =
+ efx_ef10_filter_entry_spec(table, filter_idx);
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+
+ if (!spec ||
+ (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
+ spec->priority != EFX_FILTER_PRI_HINT ||
+ !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
+ flow_id, filter_idx))
+ return false;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
+ efx_ef10_filter_rfs_expire_complete, filter_idx))
+ return false;
+
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ return true;
+}
+
+static void
+efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
+ unsigned long filter_idx,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec =
+ efx_ef10_filter_entry_spec(table, filter_idx);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ wake_up_all(&table->waitq);
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
+{
+ int match_flags = 0;
+
+#define MAP_FLAG(gen_flag, mcdi_field) { \
+ u32 old_mcdi_flags = mcdi_flags; \
+ mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
+ if (mcdi_flags != old_mcdi_flags) \
+ match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
+ }
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, SRC_IP);
+ MAP_FLAG(LOC_HOST, DST_IP);
+ MAP_FLAG(REM_MAC, SRC_MAC);
+ MAP_FLAG(REM_PORT, SRC_PORT);
+ MAP_FLAG(LOC_MAC, DST_MAC);
+ MAP_FLAG(LOC_PORT, DST_PORT);
+ MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FLAG(IP_PROTO, IP_PROTO);
+#undef MAP_FLAG
+
+ /* Did we map them all? */
+ if (mcdi_flags)
+ return -EINVAL;
+
+ return match_flags;
+}
+
+static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
+ unsigned int pd_match_pri, pd_match_count;
+ struct efx_ef10_filter_table *table;
+ size_t outlen;
+ int rc;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ /* Find out which RX filter types are supported, and their priorities */
+ MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
+ inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ goto fail;
+ pd_match_count = MCDI_VAR_ARRAY_LEN(
+ outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
+ table->rx_match_count = 0;
+
+ for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
+ u32 mcdi_flags =
+ MCDI_ARRAY_DWORD(
+ outbuf,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
+ pd_match_pri);
+ rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
+ if (rc < 0) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u not supported in driver\n",
+ __func__, mcdi_flags, pd_match_pri);
+ } else {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
+ __func__, mcdi_flags, pd_match_pri,
+ rc, table->rx_match_count);
+ table->rx_match_flags[table->rx_match_count++] = rc;
+ }
+ }
+
+ table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
+ if (!table->entry) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ efx->filter_state = table;
+ init_waitqueue_head(&table->waitq);
+ return 0;
+
+fail:
+ kfree(table);
+ return rc;
+}
+
+static void efx_ef10_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ bool failed = false;
+ int rc;
+
+ if (!nic_data->must_restore_filters)
+ return;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ spin_unlock_bh(&efx->filter_lock);
+
+ rc = efx_ef10_filter_push(efx, spec,
+ &table->entry[filter_idx].handle,
+ false);
+ if (rc)
+ failed = true;
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ } else {
+ table->entry[filter_idx].spec &=
+ ~EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ if (failed)
+ netif_err(efx, hw, efx->net_dev,
+ "unable to restore all filters\n");
+ else
+ nic_data->must_restore_filters = false;
+}
+
+static void efx_ef10_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ netdev_WARN(efx->net_dev,
+ "filter_idx=%#x handle=%#llx\n",
+ filter_idx,
+ table->entry[filter_idx].handle);
+ kfree(spec);
+ }
+
+ vfree(table->entry);
+ kfree(table);
+}
+
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_filter_spec spec;
+ bool remove_failed = false;
+ struct netdev_hw_addr *uc;
+ struct netdev_hw_addr *mc;
+ unsigned int filter_idx;
+ int i, n, rc;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ /* Mark old filters that may need to be removed */
+ spin_lock_bh(&efx->filter_lock);
+ n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
+ for (i = 0; i < n; i++) {
+ filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ }
+ n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
+ for (i = 0; i < n; i++) {
+ filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+
+ /* Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ if (net_dev->flags & IFF_PROMISC ||
+ netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
+ table->dev_uc_count = -1;
+ } else {
+ table->dev_uc_count = 1 + netdev_uc_count(net_dev);
+ ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
+ i = 1;
+ netdev_for_each_uc_addr(uc, net_dev) {
+ ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
+ i++;
+ }
+ }
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
+ netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
+ table->dev_mc_count = -1;
+ } else {
+ table->dev_mc_count = 1 + netdev_mc_count(net_dev);
+ eth_broadcast_addr(table->dev_mc_list[0].addr);
+ i = 1;
+ netdev_for_each_mc_addr(mc, net_dev) {
+ ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
+ i++;
+ }
+ }
+ netif_addr_unlock_bh(net_dev);
+
+ /* Insert/renew unicast filters */
+ if (table->dev_uc_count >= 0) {
+ for (i = 0; i < table->dev_uc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ table->dev_uc_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ /* Fall back to unicast-promisc */
+ while (i--)
+ efx_ef10_filter_remove_safe(
+ efx, EFX_FILTER_PRI_AUTO,
+ table->dev_uc_list[i].id);
+ table->dev_uc_count = -1;
+ break;
+ }
+ table->dev_uc_list[i].id = rc;
+ }
+ }
+ if (table->dev_uc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
+ 0);
+ efx_filter_set_uc_def(&spec);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ WARN_ON(1);
+ table->dev_uc_count = 0;
+ } else {
+ table->dev_uc_list[0].id = rc;
+ }
+ }
+
+ /* Insert/renew multicast filters */
+ if (table->dev_mc_count >= 0) {
+ for (i = 0; i < table->dev_mc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ table->dev_mc_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ /* Fall back to multicast-promisc */
+ while (i--)
+ efx_ef10_filter_remove_safe(
+ efx, EFX_FILTER_PRI_AUTO,
+ table->dev_mc_list[i].id);
+ table->dev_mc_count = -1;
+ break;
+ }
+ table->dev_mc_list[i].id = rc;
+ }
+ }
+ if (table->dev_mc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_FLAG_RX_RSS,
+ 0);
+ efx_filter_set_mc_def(&spec);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ WARN_ON(1);
+ table->dev_mc_count = 0;
+ } else {
+ table->dev_mc_list[0].id = rc;
+ }
+ }
+
+ /* Remove filters that weren't renewed. Since nothing else
+ * changes the AUTO_OLD flag or removes these filters, we
+ * don't need to hold the filter_lock while scanning for
+ * these filters.
+ */
+ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
+ if (ACCESS_ONCE(table->entry[i].spec) &
+ EFX_EF10_FILTER_FLAG_AUTO_OLD) {
+ if (efx_ef10_filter_remove_internal(
+ efx, 1U << EFX_FILTER_PRI_AUTO,
+ i, true) < 0)
+ remove_failed = true;
+ }
+ }
+ WARN_ON(remove_failed);
+}
+
+static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
+{
+ efx_ef10_filter_sync_rx_mode(efx);
+
+ return efx_mcdi_set_mac(efx);
+}
+
+static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
+ return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+/* MC BISTs follow a different poll mechanism to phy BISTs.
+ * The BIST is done in the poll handler on the MC, and the MCDI command
+ * will block until the BIST is done.
+ */
+static int efx_ef10_poll_bist(struct efx_nic *efx)
+{
+ int rc;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
+ size_t outlen;
+ u32 result;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
+ return -EIO;
+
+ result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
+ switch (result) {
+ case MC_CMD_POLL_BIST_PASSED:
+ netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
+ return 0;
+ case MC_CMD_POLL_BIST_TIMEOUT:
+ netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
+ return -EIO;
+ case MC_CMD_POLL_BIST_FAILED:
+ netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
+ return -EIO;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "BIST returned unknown result %u", result);
+ return -EIO;
+ }
+}
+
+static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
+{
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
+
+ rc = efx_ef10_start_bist(efx, bist_type);
+ if (rc != 0)
+ return rc;
+
+ return efx_ef10_poll_bist(efx);
+}
+
+static int
+efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc, rc2;
+
+ efx_reset_down(efx, RESET_TYPE_WORLD);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
+ NULL, 0, NULL, 0, NULL);
+ if (rc != 0)
+ goto out;
+
+ tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
+ tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
+
+ rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
+
+out:
+ rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
+ return rc ? rc : rc2;
+}
+
+#ifdef CONFIG_SFC_MTD
+
+struct efx_ef10_nvram_type_info {
+ u16 type, type_mask;
+ u8 port;
+ const char *name;
+};
+
+static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
+ { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
+ { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
+ { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
+ { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
+ { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
+ { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
+};
+
+static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *part,
+ unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
+ const struct efx_ef10_nvram_type_info *info;
+ size_t size, erase_size, outlen;
+ bool protected;
+ int rc;
+
+ for (info = efx_ef10_nvram_types; ; info++) {
+ if (info ==
+ efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
+ return -ENODEV;
+ if ((type & ~info->type_mask) == info->type)
+ break;
+ }
+ if (info->port != efx_port_num(efx))
+ return -ENODEV;
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ if (rc)
+ return rc;
+ if (protected)
+ return -ENODEV; /* hide it */
+
+ part->nvram_type = type;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
+ return -EIO;
+ if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
+ (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
+ part->fw_subtype = MCDI_DWORD(outbuf,
+ NVRAM_METADATA_OUT_SUBTYPE);
+
+ part->common.dev_type_name = "EF10 NVRAM manager";
+ part->common.type_name = info->name;
+
+ part->common.mtd.type = MTD_NORFLASH;
+ part->common.mtd.flags = MTD_CAP_NORFLASH;
+ part->common.mtd.size = size;
+ part->common.mtd.erasesize = erase_size;
+
+ return 0;
+}
+
+static int efx_ef10_mtd_probe(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
+ struct efx_mcdi_mtd_partition *parts;
+ size_t outlen, n_parts_total, i, n_parts;
+ unsigned int type;
+ int rc;
+
+ ASSERT_RTNL();
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
+ return -EIO;
+
+ n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
+ if (n_parts_total >
+ MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
+ return -EIO;
+
+ parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ n_parts = 0;
+ for (i = 0; i < n_parts_total; i++) {
+ type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
+ i);
+ rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
+ if (rc == 0)
+ n_parts++;
+ else if (rc != -ENODEV)
+ goto fail;
+ }
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+fail:
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
+}
+
+static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
+ bool temp)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
+ int rc;
+
+ if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
+ channel->sync_events_state == SYNC_EVENTS_VALID ||
+ (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
+ return 0;
+ channel->sync_events_state = SYNC_EVENTS_REQUESTED;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
+ channel->channel);
+
+ rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ if (rc != 0)
+ channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+ SYNC_EVENTS_DISABLED;
+
+ return rc;
+}
+
+static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
+ bool temp)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
+ int rc;
+
+ if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
+ (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
+ return 0;
+ if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
+ channel->sync_events_state = SYNC_EVENTS_DISABLED;
+ return 0;
+ }
+ channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
+ SYNC_EVENTS_DISABLED;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
+ MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
+ channel->channel);
+
+ rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ return rc;
+}
+
+static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
+ bool temp)
+{
+ int (*set)(struct efx_channel *channel, bool temp);
+ struct efx_channel *channel;
+
+ set = en ?
+ efx_ef10_rx_enable_timestamping :
+ efx_ef10_rx_disable_timestamping;
+
+ efx_for_each_channel(channel, efx) {
+ int rc = set(channel, temp);
+ if (en && rc != 0) {
+ efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ int rc;
+
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ efx_ef10_ptp_set_ts_sync_events(efx, false, false);
+ /* if TX timestamping is still requested then leave PTP on */
+ return efx_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF, 0);
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_ALL;
+ rc = efx_ptp_change_mode(efx, true, 0);
+ if (!rc)
+ rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
+ if (rc)
+ efx_ptp_change_mode(efx, false, 0);
+ return rc;
+ default:
+ return -ERANGE;
+ }
+}
+
+const struct efx_nic_type efx_hunt_a0_nic_type = {
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .map_reset_reason = efx_mcdi_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_ef10_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_ef10_fini_dmaq,
+ .prepare_flr = efx_ef10_prepare_flr,
+ .finish_flr = efx_port_dummy_op_void,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+ .set_id_led = efx_mcdi_set_id_led,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol,
+ .set_wol = efx_ef10_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ .test_chip = efx_ef10_test_chip,
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .irq_handle_legacy = efx_ef10_legacy_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_remove = efx_ef10_tx_remove,
+ .tx_write = efx_ef10_tx_write,
+ .rx_push_rss_config = efx_ef10_rx_push_rss_config,
+ .rx_probe = efx_ef10_rx_probe,
+ .rx_init = efx_ef10_rx_init,
+ .rx_remove = efx_ef10_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .ev_probe = efx_ef10_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_ef10_ev_fini,
+ .ev_remove = efx_ef10_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_ef10_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
+ .filter_insert = efx_ef10_filter_insert,
+ .filter_remove_safe = efx_ef10_filter_remove_safe,
+ .filter_get_safe = efx_ef10_filter_get_safe,
+ .filter_clear_rx = efx_ef10_filter_clear_rx,
+ .filter_count_rx_used = efx_ef10_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_ef10_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_ef10_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time,
+ .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+ .sriov_init = efx_ef10_sriov_init,
+ .sriov_fini = efx_ef10_sriov_fini,
+ .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed,
+ .sriov_wanted = efx_ef10_sriov_wanted,
+ .sriov_reset = efx_ef10_sriov_reset,
+
+ .revision = EFX_REV_HUNT_A0,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
+};
diff --git a/kernel/drivers/net/ethernet/sfc/ef10_regs.h b/kernel/drivers/net/ethernet/sfc/ef10_regs.h
new file mode 100644
index 000000000..62a55dde6
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/ef10_regs.h
@@ -0,0 +1,355 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2012-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EF10_REGS_H
+#define EFX_EF10_REGS_H
+
+/* EF10 hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ * E<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ * MMIO register Host memory structure
+ * -------------------------------------------------------------
+ * Address R
+ * Bitfield RF SF
+ * Enumerator FE SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ * D: Huntington A0
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * EF10 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* BIU_HW_REV_ID_REG: */
+#define ER_DZ_BIU_HW_REV_ID 0x00000000
+#define ERF_DZ_HW_REV_ID_LBN 0
+#define ERF_DZ_HW_REV_ID_WIDTH 32
+
+/* BIU_MC_SFT_STATUS_REG: */
+#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010
+#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4
+#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8
+#define ERF_DZ_MC_SFT_STATUS_LBN 0
+#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
+
+/* BIU_INT_ISR_REG: */
+#define ER_DZ_BIU_INT_ISR 0x00000090
+#define ERF_DZ_ISR_REG_LBN 0
+#define ERF_DZ_ISR_REG_WIDTH 32
+
+/* MC_DB_LWRD_REG: */
+#define ER_DZ_MC_DB_LWRD 0x00000200
+#define ERF_DZ_MC_DOORBELL_L_LBN 0
+#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
+
+/* MC_DB_HWRD_REG: */
+#define ER_DZ_MC_DB_HWRD 0x00000204
+#define ERF_DZ_MC_DOORBELL_H_LBN 0
+#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
+
+/* EVQ_RPTR_REG: */
+#define ER_DZ_EVQ_RPTR 0x00000400
+#define ER_DZ_EVQ_RPTR_STEP 8192
+#define ER_DZ_EVQ_RPTR_ROWS 2048
+#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_DZ_EVQ_RPTR_LBN 0
+#define ERF_DZ_EVQ_RPTR_WIDTH 15
+
+/* EVQ_TMR_REG: */
+#define ER_DZ_EVQ_TMR 0x00000420
+#define ER_DZ_EVQ_TMR_STEP 8192
+#define ER_DZ_EVQ_TMR_ROWS 2048
+#define ERF_DZ_TC_TIMER_MODE_LBN 14
+#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_DZ_TC_TIMER_VAL_LBN 0
+#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
+
+/* RX_DESC_UPD_REG: */
+#define ER_DZ_RX_DESC_UPD 0x00000830
+#define ER_DZ_RX_DESC_UPD_STEP 8192
+#define ER_DZ_RX_DESC_UPD_ROWS 2048
+#define ERF_DZ_RX_DESC_WPTR_LBN 0
+#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+/* TX_DESC_UPD_REG: */
+#define ER_DZ_TX_DESC_UPD 0x00000a10
+#define ER_DZ_TX_DESC_UPD_STEP 8192
+#define ER_DZ_TX_DESC_UPD_ROWS 2048
+#define ERF_DZ_RSVD_LBN 76
+#define ERF_DZ_RSVD_WIDTH 20
+#define ERF_DZ_TX_DESC_WPTR_LBN 64
+#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_DZ_TX_DESC_HWORD_LBN 32
+#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_DZ_TX_DESC_LWORD_LBN 0
+#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
+
+/* DRIVER_EV */
+#define ESF_DZ_DRV_CODE_LBN 60
+#define ESF_DZ_DRV_CODE_WIDTH 4
+#define ESF_DZ_DRV_SUB_CODE_LBN 56
+#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
+#define ESE_DZ_DRV_TIMER_EV 3
+#define ESE_DZ_DRV_START_UP_EV 2
+#define ESE_DZ_DRV_WAKE_UP_EV 1
+#define ESF_DZ_DRV_SUB_DATA_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
+#define ESF_DZ_DRV_EVQ_ID_LBN 0
+#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
+#define ESF_DZ_DRV_TMR_ID_LBN 0
+#define ESF_DZ_DRV_TMR_ID_WIDTH 14
+
+/* EVENT_ENTRY */
+#define ESF_DZ_EV_CODE_LBN 60
+#define ESF_DZ_EV_CODE_WIDTH 4
+#define ESE_DZ_EV_CODE_MCDI_EV 12
+#define ESE_DZ_EV_CODE_DRIVER_EV 5
+#define ESE_DZ_EV_CODE_TX_EV 2
+#define ESE_DZ_EV_CODE_RX_EV 0
+#define ESE_DZ_OTHER other
+#define ESF_DZ_EV_DATA_LBN 0
+#define ESF_DZ_EV_DATA_WIDTH 60
+
+/* MC_EVENT */
+#define ESF_DZ_MC_CODE_LBN 60
+#define ESF_DZ_MC_CODE_WIDTH 4
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_MC_DROP_EVENT_LBN 58
+#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
+#define ESF_DZ_MC_SOFT_LBN 0
+#define ESF_DZ_MC_SOFT_WIDTH 58
+
+/* RX_EVENT */
+#define ESF_DZ_RX_CODE_LBN 60
+#define ESF_DZ_RX_CODE_WIDTH 4
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_RX_DROP_EVENT_LBN 58
+#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
+#define ESF_DZ_RX_EV_RSVD2_LBN 54
+#define ESF_DZ_RX_EV_RSVD2_WIDTH 4
+#define ESF_DZ_RX_EV_SOFT2_LBN 52
+#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
+#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
+#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
+#define ESF_DZ_RX_L4_CLASS_LBN 45
+#define ESF_DZ_RX_L4_CLASS_WIDTH 3
+#define ESE_DZ_L4_CLASS_RSVD7 7
+#define ESE_DZ_L4_CLASS_RSVD6 6
+#define ESE_DZ_L4_CLASS_RSVD5 5
+#define ESE_DZ_L4_CLASS_RSVD4 4
+#define ESE_DZ_L4_CLASS_RSVD3 3
+#define ESE_DZ_L4_CLASS_UDP 2
+#define ESE_DZ_L4_CLASS_TCP 1
+#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_L3_CLASS_LBN 42
+#define ESF_DZ_RX_L3_CLASS_WIDTH 3
+#define ESE_DZ_L3_CLASS_RSVD7 7
+#define ESE_DZ_L3_CLASS_IP6_FRAG 6
+#define ESE_DZ_L3_CLASS_ARP 5
+#define ESE_DZ_L3_CLASS_IP4_FRAG 4
+#define ESE_DZ_L3_CLASS_FCOE 3
+#define ESE_DZ_L3_CLASS_IP6 2
+#define ESE_DZ_L3_CLASS_IP4 1
+#define ESE_DZ_L3_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
+#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
+#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
+#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
+#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
+#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
+#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
+#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
+#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
+#define ESE_DZ_ETH_TAG_CLASS_NONE 0
+#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
+#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
+#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
+#define ESE_DZ_ETH_BASE_CLASS_LLC 1
+#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
+#define ESF_DZ_RX_MAC_CLASS_LBN 35
+#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
+#define ESE_DZ_MAC_CLASS_MCAST 1
+#define ESE_DZ_MAC_CLASS_UCAST 0
+#define ESF_DZ_RX_EV_SOFT1_LBN 32
+#define ESF_DZ_RX_EV_SOFT1_WIDTH 3
+#define ESF_DZ_RX_EV_RSVD1_LBN 31
+#define ESF_DZ_RX_EV_RSVD1_WIDTH 1
+#define ESF_DZ_RX_ABORT_LBN 30
+#define ESF_DZ_RX_ABORT_WIDTH 1
+#define ESF_DZ_RX_ECC_ERR_LBN 29
+#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC1_ERR_LBN 28
+#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC0_ERR_LBN 27
+#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
+#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_ECRC_ERR_LBN 24
+#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
+#define ESF_DZ_RX_QLABEL_LBN 16
+#define ESF_DZ_RX_QLABEL_WIDTH 5
+#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
+#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
+#define ESF_DZ_RX_CONT_LBN 14
+#define ESF_DZ_RX_CONT_WIDTH 1
+#define ESF_DZ_RX_BYTES_LBN 0
+#define ESF_DZ_RX_BYTES_WIDTH 14
+
+/* RX_KER_DESC */
+#define ESF_DZ_RX_KER_RESERVED_LBN 62
+#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
+#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
+
+/* TX_CSUM_TSTAMP_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TIMESTAMP_LBN 5
+#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
+#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
+#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
+#define ESE_DZ_TX_OPTION_CRC_FCOE 1
+#define ESE_DZ_TX_OPTION_CRC_OFF 0
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
+#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
+
+/* TX_EVENT */
+#define ESF_DZ_TX_CODE_LBN 60
+#define ESF_DZ_TX_CODE_WIDTH 4
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_TX_DROP_EVENT_LBN 58
+#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
+#define ESF_DZ_TX_EV_RSVD_LBN 48
+#define ESF_DZ_TX_EV_RSVD_WIDTH 10
+#define ESF_DZ_TX_SOFT2_LBN 32
+#define ESF_DZ_TX_SOFT2_WIDTH 16
+#define ESF_DZ_TX_CAN_MERGE_LBN 31
+#define ESF_DZ_TX_CAN_MERGE_WIDTH 1
+#define ESF_DZ_TX_SOFT1_LBN 24
+#define ESF_DZ_TX_SOFT1_WIDTH 7
+#define ESF_DZ_TX_QLABEL_LBN 16
+#define ESF_DZ_TX_QLABEL_WIDTH 5
+#define ESF_DZ_TX_DESCR_INDX_LBN 0
+#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
+
+/* TX_KER_DESC */
+#define ESF_DZ_TX_KER_TYPE_LBN 63
+#define ESF_DZ_TX_KER_TYPE_WIDTH 1
+#define ESF_DZ_TX_KER_CONT_LBN 62
+#define ESF_DZ_TX_KER_CONT_WIDTH 1
+#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
+
+/* TX_PIO_DESC */
+#define ESF_DZ_TX_PIO_TYPE_LBN 63
+#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
+#define ESF_DZ_TX_PIO_OPT_LBN 60
+#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_PIO 1
+#define ESF_DZ_TX_PIO_CONT_LBN 59
+#define ESF_DZ_TX_PIO_CONT_WIDTH 1
+#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
+#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
+#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
+
+/* TX_TSO_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
+#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+/*************************************************************************/
+
+/* TX_DESC_UPD_REG: Transmit descriptor update register.
+ * We may write just one dword of these registers.
+ */
+#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4)
+#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32)
+#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH
+
+/* The workaround for bug 35388 requires multiplexing writes through
+ * the TX_DESC_UPD_DWORD address.
+ * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
+ * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
+ * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
+ */
+#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
+#define ERF_DD_EVQ_IND_RPTR_LBN 0
+#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
+#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
+#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
+#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
+#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
+#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
+
+/* TX_PIOBUF
+ * PIO buffer aperture (paged)
+ */
+#define ER_DZ_TX_PIOBUF 4096
+#define ER_DZ_TX_PIOBUF_SIZE 2048
+
+/* RX packet prefix */
+#define ES_DZ_RX_PREFIX_HASH_OFST 0
+#define ES_DZ_RX_PREFIX_VLAN1_OFST 4
+#define ES_DZ_RX_PREFIX_VLAN2_OFST 6
+#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8
+#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10
+#define ES_DZ_RX_PREFIX_SIZE 14
+
+#endif /* EFX_EF10_REGS_H */
diff --git a/kernel/drivers/net/ethernet/sfc/efx.c b/kernel/drivers/net/ethernet/sfc/efx.c
new file mode 100644
index 000000000..4b00545a3
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/efx.c
@@ -0,0 +1,3349 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/notifier.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/topology.h>
+#include <linux/gfp.h>
+#include <linux/aer.h>
+#include <linux/interrupt.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "selftest.h"
+
+#include "mcdi.h"
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * Type name strings
+ *
+ **************************************************************************
+ */
+
+/* Loopback mode names (see LOOPBACK_MODE()) */
+const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
+const char *const efx_loopback_mode_names[] = {
+ [LOOPBACK_NONE] = "NONE",
+ [LOOPBACK_DATA] = "DATAPATH",
+ [LOOPBACK_GMAC] = "GMAC",
+ [LOOPBACK_XGMII] = "XGMII",
+ [LOOPBACK_XGXS] = "XGXS",
+ [LOOPBACK_XAUI] = "XAUI",
+ [LOOPBACK_GMII] = "GMII",
+ [LOOPBACK_SGMII] = "SGMII",
+ [LOOPBACK_XGBR] = "XGBR",
+ [LOOPBACK_XFI] = "XFI",
+ [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
+ [LOOPBACK_GMII_FAR] = "GMII_FAR",
+ [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
+ [LOOPBACK_XFI_FAR] = "XFI_FAR",
+ [LOOPBACK_GPHY] = "GPHY",
+ [LOOPBACK_PHYXS] = "PHYXS",
+ [LOOPBACK_PCS] = "PCS",
+ [LOOPBACK_PMAPMD] = "PMA/PMD",
+ [LOOPBACK_XPORT] = "XPORT",
+ [LOOPBACK_XGMII_WS] = "XGMII_WS",
+ [LOOPBACK_XAUI_WS] = "XAUI_WS",
+ [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
+ [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
+ [LOOPBACK_GMII_WS] = "GMII_WS",
+ [LOOPBACK_XFI_WS] = "XFI_WS",
+ [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
+ [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
+};
+
+const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+const char *const efx_reset_type_names[] = {
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
+ [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
+ [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
+};
+
+/* Reset workqueue. If any NIC has a hardware failure then a reset will be
+ * queued onto this work queue. This is not a per-nic work queue, because
+ * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
+ */
+static struct workqueue_struct *reset_workqueue;
+
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ *************************************************************************/
+
+/*
+ * Use separate channels for TX and RX events
+ *
+ * Set this to 1 to use separate channels for TX and RX. It allows us
+ * to control interrupt affinity separately for TX and RX.
+ *
+ * This is only used in MSI-X interrupt mode
+ */
+static bool separate_tx_channels;
+module_param(separate_tx_channels, bool, 0444);
+MODULE_PARM_DESC(separate_tx_channels,
+ "Use separate channels for TX and RX");
+
+/* This is the weight assigned to each of the (per-channel) virtual
+ * NAPI devices.
+ */
+static int napi_weight = 64;
+
+/* This is the time (in jiffies) between invocations of the hardware
+ * monitor.
+ * On Falcon-based NICs, this will:
+ * - Check the on-board hardware monitor;
+ * - Poll the link state and reconfigure the hardware as necessary.
+ * On Siena-based NICs for power systems with EEH support, this will give EEH a
+ * chance to start.
+ */
+static unsigned int efx_monitor_interval = 1 * HZ;
+
+/* Initial interrupt moderation settings. They can be modified after
+ * module load with ethtool.
+ *
+ * The default for RX should strike a balance between increasing the
+ * round-trip latency and reducing overhead.
+ */
+static unsigned int rx_irq_mod_usec = 60;
+
+/* Initial interrupt moderation settings. They can be modified after
+ * module load with ethtool.
+ *
+ * This default is chosen to ensure that a 10G link does not go idle
+ * while a TX queue is stopped after it has become full. A queue is
+ * restarted when it drops below half full. The time this takes (assuming
+ * worst case 3 descriptors per packet and 1024 descriptors) is
+ * 512 / 3 * 1.2 = 205 usec.
+ */
+static unsigned int tx_irq_mod_usec = 150;
+
+/* This is the first interrupt mode to try out of:
+ * 0 => MSI-X
+ * 1 => MSI
+ * 2 => legacy
+ */
+static unsigned int interrupt_mode;
+
+/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
+ * i.e. the number of CPUs among which we may distribute simultaneous
+ * interrupt handling.
+ *
+ * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
+ * The default (0) means to assign an interrupt to each core.
+ */
+static unsigned int rss_cpus;
+module_param(rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
+static bool phy_flash_cfg;
+module_param(phy_flash_cfg, bool, 0644);
+MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
+
+static unsigned irq_adapt_low_thresh = 8000;
+module_param(irq_adapt_low_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_low_thresh,
+ "Threshold score for reducing IRQ moderation");
+
+static unsigned irq_adapt_high_thresh = 16000;
+module_param(irq_adapt_high_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_high_thresh,
+ "Threshold score for increasing IRQ moderation");
+
+static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
+/**************************************************************************
+ *
+ * Utility functions and prototypes
+ *
+ *************************************************************************/
+
+static int efx_soft_enable_interrupts(struct efx_nic *efx);
+static void efx_soft_disable_interrupts(struct efx_nic *efx);
+static void efx_remove_channel(struct efx_channel *channel);
+static void efx_remove_channels(struct efx_nic *efx);
+static const struct efx_channel_type efx_default_channel_type;
+static void efx_remove_port(struct efx_nic *efx);
+static void efx_init_napi_channel(struct efx_channel *channel);
+static void efx_fini_napi(struct efx_nic *efx);
+static void efx_fini_napi_channel(struct efx_channel *channel);
+static void efx_fini_struct(struct efx_nic *efx);
+static void efx_start_all(struct efx_nic *efx);
+static void efx_stop_all(struct efx_nic *efx);
+
+#define EFX_ASSERT_RESET_SERIALISED(efx) \
+ do { \
+ if ((efx->state == STATE_READY) || \
+ (efx->state == STATE_RECOVERY) || \
+ (efx->state == STATE_DISABLED)) \
+ ASSERT_RTNL(); \
+ } while (0)
+
+static int efx_check_disabled(struct efx_nic *efx)
+{
+ if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
+ netif_err(efx, drv, efx->net_dev,
+ "device is disabled due to earlier errors\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ *
+ *************************************************************************/
+
+/* Process channel's event queue
+ *
+ * This function is responsible for processing the event queue of a
+ * single channel. The caller must guarantee that this function will
+ * never be concurrently called more than once on the same channel,
+ * though different channels may be being processed concurrently.
+ */
+static int efx_process_channel(struct efx_channel *channel, int budget)
+{
+ int spent;
+
+ if (unlikely(!channel->enabled))
+ return 0;
+
+ spent = efx_nic_process_eventq(channel, budget);
+ if (spent && efx_channel_has_rx_queue(channel)) {
+ struct efx_rx_queue *rx_queue =
+ efx_channel_get_rx_queue(channel);
+
+ efx_rx_flush_packet(channel);
+ efx_fast_push_rx_descriptors(rx_queue, true);
+ }
+
+ return spent;
+}
+
+/* NAPI poll handler
+ *
+ * NAPI guarantees serialisation of polls of the same device, which
+ * provides the guarantee required by efx_process_channel().
+ */
+static int efx_poll(struct napi_struct *napi, int budget)
+{
+ struct efx_channel *channel =
+ container_of(napi, struct efx_channel, napi_str);
+ struct efx_nic *efx = channel->efx;
+ int spent;
+
+ if (!efx_channel_lock_napi(channel))
+ return budget;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "channel %d NAPI poll executing on CPU %d\n",
+ channel->channel, raw_smp_processor_id());
+
+ spent = efx_process_channel(channel, budget);
+
+ if (spent < budget) {
+ if (efx_channel_has_rx_queue(channel) &&
+ efx->irq_rx_adaptive &&
+ unlikely(++channel->irq_count == 1000)) {
+ if (unlikely(channel->irq_mod_score <
+ irq_adapt_low_thresh)) {
+ if (channel->irq_moderation > 1) {
+ channel->irq_moderation -= 1;
+ efx->type->push_irq_moderation(channel);
+ }
+ } else if (unlikely(channel->irq_mod_score >
+ irq_adapt_high_thresh)) {
+ if (channel->irq_moderation <
+ efx->irq_rx_moderation) {
+ channel->irq_moderation += 1;
+ efx->type->push_irq_moderation(channel);
+ }
+ }
+ channel->irq_count = 0;
+ channel->irq_mod_score = 0;
+ }
+
+ efx_filter_rfs_expire(channel);
+
+ /* There is no race here; although napi_disable() will
+ * only wait for napi_complete(), this isn't a problem
+ * since efx_nic_eventq_read_ack() will have no effect if
+ * interrupts have already been disabled.
+ */
+ napi_complete(napi);
+ efx_nic_eventq_read_ack(channel);
+ }
+
+ efx_channel_unlock_napi(channel);
+ return spent;
+}
+
+/* Create event queue
+ * Event queue memory allocations are done only once. If the channel
+ * is reset, the memory buffer will be reused; this guards against
+ * errors during channel reset and also simplifies interrupt handling.
+ */
+static int efx_probe_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned long entries;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "chan %d create event queue\n", channel->channel);
+
+ /* Build an event queue with room for one event per tx and rx buffer,
+ * plus some extra for link state events and MCDI completions. */
+ entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
+ channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
+
+ return efx_nic_probe_eventq(channel);
+}
+
+/* Prepare channel's event queue */
+static int efx_init_eventq(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ int rc;
+
+ EFX_WARN_ON_PARANOID(channel->eventq_init);
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ rc = efx_nic_init_eventq(channel);
+ if (rc == 0) {
+ efx->type->push_irq_moderation(channel);
+ channel->eventq_read_ptr = 0;
+ channel->eventq_init = true;
+ }
+ return rc;
+}
+
+/* Enable event queue processing and NAPI */
+void efx_start_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+ "chan %d start event queue\n", channel->channel);
+
+ /* Make sure the NAPI handler sees the enabled flag set */
+ channel->enabled = true;
+ smp_wmb();
+
+ efx_channel_enable(channel);
+ napi_enable(&channel->napi_str);
+ efx_nic_eventq_read_ack(channel);
+}
+
+/* Disable event queue processing and NAPI */
+void efx_stop_eventq(struct efx_channel *channel)
+{
+ if (!channel->enabled)
+ return;
+
+ napi_disable(&channel->napi_str);
+ while (!efx_channel_disable(channel))
+ usleep_range(1000, 20000);
+ channel->enabled = false;
+}
+
+static void efx_fini_eventq(struct efx_channel *channel)
+{
+ if (!channel->eventq_init)
+ return;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d fini event queue\n", channel->channel);
+
+ efx_nic_fini_eventq(channel);
+ channel->eventq_init = false;
+}
+
+static void efx_remove_eventq(struct efx_channel *channel)
+{
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d remove event queue\n", channel->channel);
+
+ efx_nic_remove_eventq(channel);
+}
+
+/**************************************************************************
+ *
+ * Channel handling
+ *
+ *************************************************************************/
+
+/* Allocate and initialise a channel structure. */
+static struct efx_channel *
+efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int j;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ channel->efx = efx;
+ channel->channel = i;
+ channel->type = &efx_default_channel_type;
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ tx_queue->efx = efx;
+ tx_queue->queue = i * EFX_TXQ_TYPES + j;
+ tx_queue->channel = channel;
+ }
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->efx = efx;
+ setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
+ (unsigned long)rx_queue);
+
+ return channel;
+}
+
+/* Allocate and initialise a channel structure, copying parameters
+ * (but not resources) from an old channel structure.
+ */
+static struct efx_channel *
+efx_copy_channel(const struct efx_channel *old_channel)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int j;
+
+ channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return NULL;
+
+ *channel = *old_channel;
+
+ channel->napi_dev = NULL;
+ memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+ for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ tx_queue = &channel->tx_queue[j];
+ if (tx_queue->channel)
+ tx_queue->channel = channel;
+ tx_queue->buffer = NULL;
+ memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
+ }
+
+ rx_queue = &channel->rx_queue;
+ rx_queue->buffer = NULL;
+ memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+ setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
+ (unsigned long)rx_queue);
+
+ return channel;
+}
+
+static int efx_probe_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc;
+
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "creating channel %d\n", channel->channel);
+
+ rc = channel->type->pre_probe(channel);
+ if (rc)
+ goto fail;
+
+ rc = efx_probe_eventq(channel);
+ if (rc)
+ goto fail;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ rc = efx_probe_tx_queue(tx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rc = efx_probe_rx_queue(rx_queue);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ efx_remove_channel(channel);
+ return rc;
+}
+
+static void
+efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
+{
+ struct efx_nic *efx = channel->efx;
+ const char *type;
+ int number;
+
+ number = channel->channel;
+ if (efx->tx_channel_offset == 0) {
+ type = "";
+ } else if (channel->channel < efx->tx_channel_offset) {
+ type = "-rx";
+ } else {
+ type = "-tx";
+ number -= efx->tx_channel_offset;
+ }
+ snprintf(buf, len, "%s%s-%d", efx->name, type, number);
+}
+
+static void efx_set_channel_names(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ channel->type->get_name(channel,
+ efx->msi_context[channel->channel].name,
+ sizeof(efx->msi_context[0].name));
+}
+
+static int efx_probe_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ int rc;
+
+ /* Restart special buffer allocation */
+ efx->next_buffer_table = 0;
+
+ /* Probe channels in reverse, so that any 'extra' channels
+ * use the start of the buffer table. This allows the traffic
+ * channels to be resized without moving them or wasting the
+ * entries before them.
+ */
+ efx_for_each_channel_rev(channel, efx) {
+ rc = efx_probe_channel(channel);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
+ goto fail;
+ }
+ }
+ efx_set_channel_names(efx);
+
+ return 0;
+
+fail:
+ efx_remove_channels(efx);
+ return rc;
+}
+
+/* Channels are shutdown and reinitialised whilst the NIC is running
+ * to propagate configuration changes (mtu, checksum offload), or
+ * to clear hardware error conditions
+ */
+static void efx_start_datapath(struct efx_nic *efx)
+{
+ bool old_rx_scatter = efx->rx_scatter;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ struct efx_channel *channel;
+ size_t rx_buf_len;
+
+ /* Calculate the rx buffer allocation parameters required to
+ * support the current MTU, including padding for header
+ * alignment and overruns.
+ */
+ efx->rx_dma_len = (efx->rx_prefix_size +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+ rx_buf_len = (sizeof(struct efx_rx_page_state) +
+ efx->rx_ip_align + efx->rx_dma_len);
+ if (rx_buf_len <= PAGE_SIZE) {
+ efx->rx_scatter = efx->type->always_rx_scatter;
+ efx->rx_buffer_order = 0;
+ } else if (efx->type->can_rx_scatter) {
+ BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
+ BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
+ 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
+ EFX_RX_BUF_ALIGNMENT) >
+ PAGE_SIZE);
+ efx->rx_scatter = true;
+ efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
+ efx->rx_buffer_order = 0;
+ } else {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = get_order(rx_buf_len);
+ }
+
+ efx_rx_config_page_split(efx);
+ if (efx->rx_buffer_order)
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u; page order=%u batch=%u\n",
+ efx->rx_dma_len, efx->rx_buffer_order,
+ efx->rx_pages_per_batch);
+ else
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
+ efx->rx_dma_len, efx->rx_page_buf_step,
+ efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+
+ /* RX filters may also have scatter-enabled flags */
+ if (efx->rx_scatter != old_rx_scatter)
+ efx->type->filter_update_rx_scatter(efx);
+
+ /* We must keep at least one descriptor in a TX ring empty.
+ * We could avoid this when the queue size does not exactly
+ * match the hardware ring size, but it's not that important.
+ * Therefore we stop the queue when one more skb might fill
+ * the ring completely. We wake it when half way back to
+ * empty.
+ */
+ efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+ efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
+ /* Initialise the channels */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_init_tx_queue(tx_queue);
+ atomic_inc(&efx->active_queues);
+ }
+
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ efx_init_rx_queue(rx_queue);
+ atomic_inc(&efx->active_queues);
+ efx_stop_eventq(channel);
+ efx_fast_push_rx_descriptors(rx_queue, false);
+ efx_start_eventq(channel);
+ }
+
+ WARN_ON(channel->rx_pkt_n_frags);
+ }
+
+ efx_ptp_start_datapath(efx);
+
+ if (netif_device_present(efx->net_dev))
+ netif_tx_wake_all_queues(efx->net_dev);
+}
+
+static void efx_stop_datapath(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
+ efx_ptp_stop_datapath(efx);
+
+ /* Stop RX refill */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ rx_queue->refill_enabled = false;
+ }
+
+ efx_for_each_channel(channel, efx) {
+ /* RX packet processing is pipelined, so wait for the
+ * NAPI handler to complete. At least event queue 0
+ * might be kept active by non-data events, so don't
+ * use napi_synchronize() but actually disable NAPI
+ * temporarily.
+ */
+ if (efx_channel_has_rx_queue(channel)) {
+ efx_stop_eventq(channel);
+ efx_start_eventq(channel);
+ }
+ }
+
+ rc = efx->type->fini_dmaq(efx);
+ if (rc && EFX_WORKAROUND_7803(efx)) {
+ /* Schedule a reset to recover from the flush failure. The
+ * descriptor caches reference memory we're about to free,
+ * but falcon_reconfigure_mac_wrapper() won't reconnect
+ * the MACs because of the pending reset.
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "Resetting to recover from flush failure\n");
+ efx_schedule_reset(efx, RESET_TYPE_ALL);
+ } else if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_fini_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_fini_tx_queue(tx_queue);
+ }
+}
+
+static void efx_remove_channel(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "destroy chan %d\n", channel->channel);
+
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_remove_rx_queue(rx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+ efx_remove_tx_queue(tx_queue);
+ efx_remove_eventq(channel);
+ channel->type->post_remove(channel);
+}
+
+static void efx_remove_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_remove_channel(channel);
+}
+
+int
+efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+{
+ struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+ u32 old_rxq_entries, old_txq_entries;
+ unsigned i, next_buffer_table = 0;
+ int rc, rc2;
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+
+ /* Not all channels should be reallocated. We must avoid
+ * reallocating their buffer table entries.
+ */
+ efx_for_each_channel(channel, efx) {
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+
+ if (channel->type->copy)
+ continue;
+ next_buffer_table = max(next_buffer_table,
+ channel->eventq.index +
+ channel->eventq.entries);
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ rx_queue->rxd.index +
+ rx_queue->rxd.entries);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ next_buffer_table = max(next_buffer_table,
+ tx_queue->txd.index +
+ tx_queue->txd.entries);
+ }
+
+ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+ efx_soft_disable_interrupts(efx);
+
+ /* Clone channels (where possible) */
+ memset(other_channel, 0, sizeof(other_channel));
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (channel->type->copy)
+ channel = channel->type->copy(channel);
+ if (!channel) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ other_channel[i] = channel;
+ }
+
+ /* Swap entry counts and channel pointers */
+ old_rxq_entries = efx->rxq_entries;
+ old_txq_entries = efx->txq_entries;
+ efx->rxq_entries = rxq_entries;
+ efx->txq_entries = txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+
+ /* Restart buffer table allocation */
+ efx->next_buffer_table = next_buffer_table;
+
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ if (!channel->type->copy)
+ continue;
+ rc = efx_probe_channel(channel);
+ if (rc)
+ goto rollback;
+ efx_init_napi_channel(efx->channel[i]);
+ }
+
+out:
+ /* Destroy unused channel structures */
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = other_channel[i];
+ if (channel && channel->type->copy) {
+ efx_fini_napi_channel(channel);
+ efx_remove_channel(channel);
+ kfree(channel);
+ }
+ }
+
+ rc2 = efx_soft_enable_interrupts(efx);
+ if (rc2) {
+ rc = rc ? rc : rc2;
+ netif_err(efx, drv, efx->net_dev,
+ "unable to restart interrupts on channel reallocation\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ } else {
+ efx_start_all(efx);
+ netif_device_attach(efx->net_dev);
+ }
+ return rc;
+
+rollback:
+ /* Swap back */
+ efx->rxq_entries = old_rxq_entries;
+ efx->txq_entries = old_txq_entries;
+ for (i = 0; i < efx->n_channels; i++) {
+ channel = efx->channel[i];
+ efx->channel[i] = other_channel[i];
+ other_channel[i] = channel;
+ }
+ goto out;
+}
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
+{
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
+}
+
+static const struct efx_channel_type efx_default_channel_type = {
+ .pre_probe = efx_channel_dummy_op_int,
+ .post_remove = efx_channel_dummy_op_void,
+ .get_name = efx_get_channel_name,
+ .copy = efx_copy_channel,
+ .keep_eventq = false,
+};
+
+int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+ return 0;
+}
+
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+/* This ensures that the kernel is kept informed (via
+ * netif_carrier_on/off) of the link status, and also maintains the
+ * link status's stop on the port's TX queue.
+ */
+void efx_link_status_changed(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+
+ /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
+ * that no events are triggered between unregister_netdev() and the
+ * driver unloading. A more general condition is that NETDEV_CHANGE
+ * can only be generated between NETDEV_UP and NETDEV_DOWN */
+ if (!netif_running(efx->net_dev))
+ return;
+
+ if (link_state->up != netif_carrier_ok(efx->net_dev)) {
+ efx->n_link_state_changes++;
+
+ if (link_state->up)
+ netif_carrier_on(efx->net_dev);
+ else
+ netif_carrier_off(efx->net_dev);
+ }
+
+ /* Status message for kernel log */
+ if (link_state->up)
+ netif_info(efx, link, efx->net_dev,
+ "link up at %uMbps %s-duplex (MTU %d)\n",
+ link_state->speed, link_state->fd ? "full" : "half",
+ efx->net_dev->mtu);
+ else
+ netif_info(efx, link, efx->net_dev, "link down\n");
+}
+
+void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
+{
+ efx->link_advertising = advertising;
+ if (advertising) {
+ if (advertising & ADVERTISED_Pause)
+ efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+ else
+ efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+ if (advertising & ADVERTISED_Asym_Pause)
+ efx->wanted_fc ^= EFX_FC_TX;
+ }
+}
+
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
+{
+ efx->wanted_fc = wanted_fc;
+ if (efx->link_advertising) {
+ if (wanted_fc & EFX_FC_RX)
+ efx->link_advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ else
+ efx->link_advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ if (wanted_fc & EFX_FC_TX)
+ efx->link_advertising ^= ADVERTISED_Asym_Pause;
+ }
+}
+
+static void efx_fini_port(struct efx_nic *efx);
+
+/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
+ * the MAC appropriately. All other PHY configuration changes are pushed
+ * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through efx_monitor().
+ *
+ * Callers must hold the mac_lock
+ */
+int __efx_reconfigure_port(struct efx_nic *efx)
+{
+ enum efx_phy_mode phy_mode;
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ /* Disable PHY transmit in mac level loopbacks */
+ phy_mode = efx->phy_mode;
+ if (LOOPBACK_INTERNAL(efx))
+ efx->phy_mode |= PHY_MODE_TX_DISABLED;
+ else
+ efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
+
+ rc = efx->type->reconfigure_port(efx);
+
+ if (rc)
+ efx->phy_mode = phy_mode;
+
+ return rc;
+}
+
+/* Reinitialise the MAC to pick up new PHY settings, even if the port is
+ * disabled. */
+int efx_reconfigure_port(struct efx_nic *efx)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ rc = __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Asynchronous work item for changing MAC promiscuity and multicast
+ * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
+ * MAC directly. */
+static void efx_mac_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_enabled)
+ efx->type->reconfigure_mac(efx);
+ mutex_unlock(&efx->mac_lock);
+}
+
+static int efx_probe_port(struct efx_nic *efx)
+{
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "create port\n");
+
+ if (phy_flash_cfg)
+ efx->phy_mode = PHY_MODE_SPECIAL;
+
+ /* Connect up MAC/PHY operations table */
+ rc = efx->type->probe_port(efx);
+ if (rc)
+ return rc;
+
+ /* Initialise MAC address to permanent address */
+ ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
+
+ return 0;
+}
+
+static int efx_init_port(struct efx_nic *efx)
+{
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "init port\n");
+
+ mutex_lock(&efx->mac_lock);
+
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ goto fail1;
+
+ efx->port_initialized = true;
+
+ /* Reconfigure the MAC before creating dma queues (required for
+ * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
+ efx->type->reconfigure_mac(efx);
+
+ /* Ensure the PHY advertises the correct flow control settings */
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc)
+ goto fail2;
+
+ mutex_unlock(&efx->mac_lock);
+ return 0;
+
+fail2:
+ efx->phy_op->fini(efx);
+fail1:
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+static void efx_start_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifup, efx->net_dev, "start port\n");
+ BUG_ON(efx->port_enabled);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = true;
+
+ /* Ensure MAC ingress/egress is enabled */
+ efx->type->reconfigure_mac(efx);
+
+ mutex_unlock(&efx->mac_lock);
+}
+
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
+static void efx_stop_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ mutex_lock(&efx->mac_lock);
+ efx->port_enabled = false;
+ mutex_unlock(&efx->mac_lock);
+
+ /* Serialise against efx_set_multicast_list() */
+ netif_addr_lock_bh(efx->net_dev);
+ netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
+}
+
+static void efx_fini_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
+
+ if (!efx->port_initialized)
+ return;
+
+ efx->phy_op->fini(efx);
+ efx->port_initialized = false;
+
+ efx->link_state.up = false;
+ efx_link_status_changed(efx);
+}
+
+static void efx_remove_port(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
+
+ efx->type->remove_port(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC handling
+ *
+ **************************************************************************/
+
+static LIST_HEAD(efx_primary_list);
+static LIST_HEAD(efx_unassociated_list);
+
+static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
+{
+ return left->type == right->type &&
+ left->vpd_sn && right->vpd_sn &&
+ !strcmp(left->vpd_sn, right->vpd_sn);
+}
+
+static void efx_associate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ if (efx->primary == efx) {
+ /* Adding primary function; look for secondaries */
+
+ netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
+ list_add_tail(&efx->node, &efx_primary_list);
+
+ list_for_each_entry_safe(other, next, &efx_unassociated_list,
+ node) {
+ if (efx_same_controller(efx, other)) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to secondary list of %s %s\n",
+ pci_name(efx->pci_dev),
+ efx->net_dev->name);
+ list_add_tail(&other->node,
+ &efx->secondary_list);
+ other->primary = efx;
+ }
+ }
+ } else {
+ /* Adding secondary function; look for primary */
+
+ list_for_each_entry(other, &efx_primary_list, node) {
+ if (efx_same_controller(efx, other)) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to secondary list of %s %s\n",
+ pci_name(other->pci_dev),
+ other->net_dev->name);
+ list_add_tail(&efx->node,
+ &other->secondary_list);
+ efx->primary = other;
+ return;
+ }
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to unassociated list\n");
+ list_add_tail(&efx->node, &efx_unassociated_list);
+ }
+}
+
+static void efx_dissociate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ list_del(&efx->node);
+ efx->primary = NULL;
+
+ list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to unassociated list\n");
+ list_add_tail(&other->node, &efx_unassociated_list);
+ other->primary = NULL;
+ }
+}
+
+/* This configures the PCI device to enable I/O and DMA. */
+static int efx_init_io(struct efx_nic *efx)
+{
+ struct pci_dev *pci_dev = efx->pci_dev;
+ dma_addr_t dma_mask = efx->type->max_dma_mask;
+ unsigned int mem_map_size = efx->type->mem_map_size(efx);
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+
+ rc = pci_enable_device(pci_dev);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to enable PCI device\n");
+ goto fail1;
+ }
+
+ pci_set_master(pci_dev);
+
+ /* Set the PCI DMA mask. Try all possibilities from our
+ * genuine mask down to 32 bits, because some architectures
+ * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
+ * masks event though they reject 46 bit masks.
+ */
+ while (dma_mask > 0x7fffffffUL) {
+ if (dma_supported(&pci_dev->dev, dma_mask)) {
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
+ if (rc == 0)
+ break;
+ }
+ dma_mask >>= 1;
+ }
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not find a suitable DMA mask\n");
+ goto fail2;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "using DMA mask %llx\n", (unsigned long long) dma_mask);
+
+ efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
+ rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "request for memory BAR failed\n");
+ rc = -EIO;
+ goto fail3;
+ }
+ efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
+ if (!efx->membase) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not map memory BAR at %llx+%x\n",
+ (unsigned long long)efx->membase_phys, mem_map_size);
+ rc = -ENOMEM;
+ goto fail4;
+ }
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %llx+%x (virtual %p)\n",
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
+
+ return 0;
+
+ fail4:
+ pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ fail3:
+ efx->membase_phys = 0;
+ fail2:
+ pci_disable_device(efx->pci_dev);
+ fail1:
+ return rc;
+}
+
+static void efx_fini_io(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+
+ if (efx->membase) {
+ iounmap(efx->membase);
+ efx->membase = NULL;
+ }
+
+ if (efx->membase_phys) {
+ pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ efx->membase_phys = 0;
+ }
+
+ pci_disable_device(efx->pci_dev);
+}
+
+static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
+{
+ cpumask_var_t thread_mask;
+ unsigned int count;
+ int cpu;
+
+ if (rss_cpus) {
+ count = rss_cpus;
+ } else {
+ if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
+ netif_warn(efx, probe, efx->net_dev,
+ "RSS disabled due to allocation failure\n");
+ return 1;
+ }
+
+ count = 0;
+ for_each_online_cpu(cpu) {
+ if (!cpumask_test_cpu(cpu, thread_mask)) {
+ ++count;
+ cpumask_or(thread_mask, thread_mask,
+ topology_thread_cpumask(cpu));
+ }
+ }
+
+ free_cpumask_var(thread_mask);
+ }
+
+ /* If RSS is requested for the PF *and* VFs then we can't write RSS
+ * table entries that are inaccessible to VFs
+ */
+ if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ count > efx_vf_size(efx)) {
+ netif_warn(efx, probe, efx->net_dev,
+ "Reducing number of RSS channels from %u to %u for "
+ "VF support. Increase vf-msix-limit to use more "
+ "channels on the PF.\n",
+ count, efx_vf_size(efx));
+ count = efx_vf_size(efx);
+ }
+
+ return count;
+}
+
+/* Probe the number and type of interrupts we are able to obtain, and
+ * the resulting numbers of channels and RX queues.
+ */
+static int efx_probe_interrupts(struct efx_nic *efx)
+{
+ unsigned int extra_channels = 0;
+ unsigned int i, j;
+ int rc;
+
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
+ if (efx->extra_channel_type[i])
+ ++extra_channels;
+
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ struct msix_entry xentries[EFX_MAX_CHANNELS];
+ unsigned int n_channels;
+
+ n_channels = efx_wanted_parallelism(efx);
+ if (separate_tx_channels)
+ n_channels *= 2;
+ n_channels += extra_channels;
+ n_channels = min(n_channels, efx->max_channels);
+
+ for (i = 0; i < n_channels; i++)
+ xentries[i].entry = i;
+ rc = pci_enable_msix_range(efx->pci_dev,
+ xentries, 1, n_channels);
+ if (rc < 0) {
+ /* Fall back to single channel MSI */
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ } else if (rc < n_channels) {
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors"
+ " available (%d < %u).\n", rc, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
+ n_channels = rc;
+ }
+
+ if (rc > 0) {
+ efx->n_channels = n_channels;
+ if (n_channels > extra_channels)
+ n_channels -= extra_channels;
+ if (separate_tx_channels) {
+ efx->n_tx_channels = max(n_channels / 2, 1U);
+ efx->n_rx_channels = max(n_channels -
+ efx->n_tx_channels,
+ 1U);
+ } else {
+ efx->n_tx_channels = n_channels;
+ efx->n_rx_channels = n_channels;
+ }
+ for (i = 0; i < efx->n_channels; i++)
+ efx_get_channel(efx, i)->irq =
+ xentries[i].vector;
+ }
+ }
+
+ /* Try single interrupt MSI */
+ if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
+ efx->n_channels = 1;
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ rc = pci_enable_msi(efx->pci_dev);
+ if (rc == 0) {
+ efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI\n");
+ efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+ }
+ }
+
+ /* Assume legacy interrupts */
+ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
+ efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
+ efx->n_rx_channels = 1;
+ efx->n_tx_channels = 1;
+ efx->legacy_irq = efx->pci_dev->irq;
+ }
+
+ /* Assign extra channels if possible */
+ j = efx->n_channels;
+ for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
+ if (!efx->extra_channel_type[i])
+ continue;
+ if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
+ efx->n_channels <= extra_channels) {
+ efx->extra_channel_type[i]->handle_no_channel(efx);
+ } else {
+ --j;
+ efx_get_channel(efx, j)->type =
+ efx->extra_channel_type[i];
+ }
+ }
+
+ /* RSS might be usable on VFs even if it is disabled on the PF */
+
+ efx->rss_spread = ((efx->n_rx_channels > 1 ||
+ !efx->type->sriov_wanted(efx)) ?
+ efx->n_rx_channels : efx_vf_size(efx));
+
+ return 0;
+}
+
+static int efx_soft_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ efx->irq_soft_enabled = true;
+ smp_wmb();
+
+ efx_for_each_channel(channel, efx) {
+ if (!channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ efx_start_eventq(channel);
+ }
+
+ efx_mcdi_mode_event(efx);
+
+ return 0;
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ return rc;
+}
+
+static void efx_soft_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ if (efx->state == STATE_DISABLED)
+ return;
+
+ efx_mcdi_mode_poll(efx);
+
+ efx->irq_soft_enabled = false;
+ smp_wmb();
+
+ if (efx->legacy_irq)
+ synchronize_irq(efx->legacy_irq);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->irq)
+ synchronize_irq(channel->irq);
+
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ /* Flush the asynchronous MCDI request queue */
+ efx_mcdi_flush_async(efx);
+}
+
+static int efx_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ if (efx->eeh_disabled_legacy_irq) {
+ enable_irq(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = false;
+ }
+
+ efx->type->irq_enable_master(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ rc = efx_soft_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+
+ return rc;
+}
+
+static void efx_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_soft_disable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
+}
+
+static void efx_remove_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ /* Remove MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ channel->irq = 0;
+ pci_disable_msi(efx->pci_dev);
+ pci_disable_msix(efx->pci_dev);
+
+ /* Remove legacy interrupt */
+ efx->legacy_irq = 0;
+}
+
+static void efx_set_channels(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ efx->tx_channel_offset =
+ separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
+
+ /* We need to mark which channels really have RX and TX
+ * queues, and adjust the TX queue numbers if we have separate
+ * RX-only and TX-only channels.
+ */
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel < efx->n_rx_channels)
+ channel->rx_queue.core_index = channel->channel;
+ else
+ channel->rx_queue.core_index = -1;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->queue -= (efx->tx_channel_offset *
+ EFX_TXQ_TYPES);
+ }
+}
+
+static int efx_probe_nic(struct efx_nic *efx)
+{
+ size_t i;
+ int rc;
+
+ netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
+
+ /* Carry out hardware-type specific initialisation */
+ rc = efx->type->probe(efx);
+ if (rc)
+ return rc;
+
+ /* Determine the number of channels and queues by trying to hook
+ * in MSI-X interrupts. */
+ rc = efx_probe_interrupts(efx);
+ if (rc)
+ goto fail1;
+
+ efx_set_channels(efx);
+
+ rc = efx->type->dimension_resources(efx);
+ if (rc)
+ goto fail2;
+
+ if (efx->n_channels > 1)
+ netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ efx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+
+ netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+ netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
+
+ /* Initialise the interrupt moderation settings */
+ efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
+ true);
+
+ return 0;
+
+fail2:
+ efx_remove_interrupts(efx);
+fail1:
+ efx->type->remove(efx);
+ return rc;
+}
+
+static void efx_remove_nic(struct efx_nic *efx)
+{
+ netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
+
+ efx_remove_interrupts(efx);
+ efx->type->remove(efx);
+}
+
+static int efx_probe_filters(struct efx_nic *efx)
+{
+ int rc;
+
+ spin_lock_init(&efx->filter_lock);
+
+ rc = efx->type->filter_table_probe(efx);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->type->offload_features & NETIF_F_NTUPLE) {
+ efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*efx->rps_flow_id),
+ GFP_KERNEL);
+ if (!efx->rps_flow_id) {
+ efx->type->filter_table_remove(efx);
+ return -ENOMEM;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void efx_remove_filters(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ kfree(efx->rps_flow_id);
+#endif
+ efx->type->filter_table_remove(efx);
+}
+
+static void efx_restore_filters(struct efx_nic *efx)
+{
+ efx->type->filter_table_restore(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC startup/shutdown
+ *
+ *************************************************************************/
+
+static int efx_probe_all(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_probe_nic(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
+ goto fail1;
+ }
+
+ rc = efx_probe_port(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to create port\n");
+ goto fail2;
+ }
+
+ BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
+ if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
+ rc = -EINVAL;
+ goto fail3;
+ }
+ efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+
+ rc = efx_probe_filters(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create filter tables\n");
+ goto fail3;
+ }
+
+ rc = efx_probe_channels(efx);
+ if (rc)
+ goto fail4;
+
+ return 0;
+
+ fail4:
+ efx_remove_filters(efx);
+ fail3:
+ efx_remove_port(efx);
+ fail2:
+ efx_remove_nic(efx);
+ fail1:
+ return rc;
+}
+
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured. Interrupts must already be enabled. This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
+ */
+static void efx_start_all(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ /* Check that it is appropriate to restart the interface. All
+ * of these flags are safe to read under just the rtnl lock */
+ if (efx->port_enabled || !netif_running(efx->net_dev) ||
+ efx->reset_pending)
+ return;
+
+ efx_start_port(efx);
+ efx_start_datapath(efx);
+
+ /* Start the hardware monitor if there is one */
+ if (efx->type->monitor != NULL)
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ efx_monitor_interval);
+
+ /* If link state detection is normally event-driven, we have
+ * to poll now because we could have missed a change
+ */
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ mutex_lock(&efx->mac_lock);
+ if (efx->phy_op->poll(efx))
+ efx_link_status_changed(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ efx->type->start_stats(efx);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+}
+
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down. Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled. Requires the RTNL lock.
+ */
+static void efx_stop_all(struct efx_nic *efx)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ /* port_enabled can be read safely under the rtnl lock */
+ if (!efx->port_enabled)
+ return;
+
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
+ efx->type->stop_stats(efx);
+ efx_stop_port(efx);
+
+ /* Stop the kernel transmit interface. This is only valid if
+ * the device is stopped or detached; otherwise the watchdog
+ * may fire immediately.
+ */
+ WARN_ON(netif_running(efx->net_dev) &&
+ netif_device_present(efx->net_dev));
+ netif_tx_disable(efx->net_dev);
+
+ efx_stop_datapath(efx);
+}
+
+static void efx_remove_all(struct efx_nic *efx)
+{
+ efx_remove_channels(efx);
+ efx_remove_filters(efx);
+ efx_remove_port(efx);
+ efx_remove_nic(efx);
+}
+
+/**************************************************************************
+ *
+ * Interrupt moderation
+ *
+ **************************************************************************/
+
+static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
+{
+ if (usecs == 0)
+ return 0;
+ if (usecs * 1000 < quantum_ns)
+ return 1; /* never round down to 0 */
+ return usecs * 1000 / quantum_ns;
+}
+
+/* Set interrupt moderation parameters */
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx)
+{
+ struct efx_channel *channel;
+ unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
+ efx->timer_quantum_ns,
+ 1000);
+ unsigned int tx_ticks;
+ unsigned int rx_ticks;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
+ return -EINVAL;
+
+ tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
+ rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
+
+ if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
+ !rx_may_override_tx) {
+ netif_err(efx, drv, efx->net_dev, "Channels are shared. "
+ "RX and TX IRQ moderation must be equal\n");
+ return -EINVAL;
+ }
+
+ efx->irq_rx_adaptive = rx_adaptive;
+ efx->irq_rx_moderation = rx_ticks;
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel))
+ channel->irq_moderation = rx_ticks;
+ else if (efx_channel_has_tx_queues(channel))
+ channel->irq_moderation = tx_ticks;
+ }
+
+ return 0;
+}
+
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive)
+{
+ /* We must round up when converting ticks to microseconds
+ * because we round down when converting the other way.
+ */
+
+ *rx_adaptive = efx->irq_rx_adaptive;
+ *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
+ efx->timer_quantum_ns,
+ 1000);
+
+ /* If channels are shared between RX and TX, so is IRQ
+ * moderation. Otherwise, IRQ moderation is the same for all
+ * TX channels and is not adaptive.
+ */
+ if (efx->tx_channel_offset == 0)
+ *tx_usecs = *rx_usecs;
+ else
+ *tx_usecs = DIV_ROUND_UP(
+ efx->channel[efx->tx_channel_offset]->irq_moderation *
+ efx->timer_quantum_ns,
+ 1000);
+}
+
+/**************************************************************************
+ *
+ * Hardware monitor
+ *
+ **************************************************************************/
+
+/* Run periodically off the general workqueue */
+static void efx_monitor(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic,
+ monitor_work.work);
+
+ netif_vdbg(efx, timer, efx->net_dev,
+ "hardware monitor executing on CPU %d\n",
+ raw_smp_processor_id());
+ BUG_ON(efx->type->monitor == NULL);
+
+ /* If the mac_lock is already held then it is likely a port
+ * reconfiguration is already in place, which will likely do
+ * most of the work of monitor() anyway. */
+ if (mutex_trylock(&efx->mac_lock)) {
+ if (efx->port_enabled)
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ queue_delayed_work(efx->workqueue, &efx->monitor_work,
+ efx_monitor_interval);
+}
+
+/**************************************************************************
+ *
+ * ioctls
+ *
+ *************************************************************************/
+
+/* Net device ioctl
+ * Context: process, rtnl_lock() held.
+ */
+static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (cmd == SIOCSHWTSTAMP)
+ return efx_ptp_set_ts_config(efx, ifr);
+ if (cmd == SIOCGHWTSTAMP)
+ return efx_ptp_get_ts_config(efx, ifr);
+
+ /* Convert phy_id from older PRTAD/DEVAD format */
+ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
+ (data->phy_id & 0xfc00) == 0x0400)
+ data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
+
+ return mdio_mii_ioctl(&efx->mdio, data, cmd);
+}
+
+/**************************************************************************
+ *
+ * NAPI interface
+ *
+ **************************************************************************/
+
+static void efx_init_napi_channel(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ channel->napi_dev = efx->net_dev;
+ netif_napi_add(channel->napi_dev, &channel->napi_str,
+ efx_poll, napi_weight);
+ napi_hash_add(&channel->napi_str);
+ efx_channel_init_lock(channel);
+}
+
+static void efx_init_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_init_napi_channel(channel);
+}
+
+static void efx_fini_napi_channel(struct efx_channel *channel)
+{
+ if (channel->napi_dev) {
+ netif_napi_del(&channel->napi_str);
+ napi_hash_del(&channel->napi_str);
+ }
+ channel->napi_dev = NULL;
+}
+
+static void efx_fini_napi(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_fini_napi_channel(channel);
+}
+
+/**************************************************************************
+ *
+ * Kernel netpoll interface
+ *
+ *************************************************************************/
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+/* Although in the common case interrupts will be disabled, this is not
+ * guaranteed. However, all our work happens inside the NAPI callback,
+ * so no locking is required.
+ */
+static void efx_netpoll(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_schedule_channel(channel);
+}
+
+#endif
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int efx_busy_poll(struct napi_struct *napi)
+{
+ struct efx_channel *channel =
+ container_of(napi, struct efx_channel, napi_str);
+ struct efx_nic *efx = channel->efx;
+ int budget = 4;
+ int old_rx_packets, rx_packets;
+
+ if (!netif_running(efx->net_dev))
+ return LL_FLUSH_FAILED;
+
+ if (!efx_channel_lock_poll(channel))
+ return LL_FLUSH_BUSY;
+
+ old_rx_packets = channel->rx_queue.rx_packets;
+ efx_process_channel(channel, budget);
+
+ rx_packets = channel->rx_queue.rx_packets - old_rx_packets;
+
+ /* There is no race condition with NAPI here.
+ * NAPI will automatically be rescheduled if it yielded during busy
+ * polling, because it was not able to take the lock and thus returned
+ * the full budget.
+ */
+ efx_channel_unlock_poll(channel);
+
+ return rx_packets;
+}
+#endif
+
+/**************************************************************************
+ *
+ * Kernel net device interface
+ *
+ *************************************************************************/
+
+/* Context: process, rtnl_lock() held. */
+static int efx_net_open(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
+ raw_smp_processor_id());
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+ if (efx->phy_mode & PHY_MODE_SPECIAL)
+ return -EBUSY;
+ if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
+ return -EIO;
+
+ /* Notify the kernel of the link state polled during driver load,
+ * before the monitor starts running */
+ efx_link_status_changed(efx);
+
+ efx_start_all(efx);
+ efx_selftest_async_start(efx);
+ return 0;
+}
+
+/* Context: process, rtnl_lock() held.
+ * Note that the kernel will ignore our return code; this method
+ * should really be a void.
+ */
+static int efx_net_stop(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
+ raw_smp_processor_id());
+
+ /* Stop the device and flush all the channels */
+ efx_stop_all(efx);
+
+ return 0;
+}
+
+/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, stats);
+ spin_unlock_bh(&efx->stats_lock);
+
+ return stats;
+}
+
+/* Context: netif_tx_lock held, BHs disabled. */
+static void efx_watchdog(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX stuck with port_enabled=%d: resetting channels\n",
+ efx->port_enabled);
+
+ efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
+}
+
+
+/* Context: process, rtnl_lock() held. */
+static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx_check_disabled(efx);
+ if (rc)
+ return rc;
+ if (new_mtu > EFX_MAX_MTU)
+ return -EINVAL;
+
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+
+ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+
+ mutex_lock(&efx->mac_lock);
+ net_dev->mtu = new_mtu;
+ efx->type->reconfigure_mac(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+ netif_device_attach(efx->net_dev);
+ return 0;
+}
+
+static int efx_set_mac_address(struct net_device *net_dev, void *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct sockaddr *addr = data;
+ u8 *new_addr = addr->sa_data;
+
+ if (!is_valid_ether_addr(new_addr)) {
+ netif_err(efx, drv, efx->net_dev,
+ "invalid ethernet MAC address requested: %pM\n",
+ new_addr);
+ return -EADDRNOTAVAIL;
+ }
+
+ ether_addr_copy(net_dev->dev_addr, new_addr);
+ efx->type->sriov_mac_address_changed(efx);
+
+ /* Reconfigure the MAC */
+ mutex_lock(&efx->mac_lock);
+ efx->type->reconfigure_mac(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ return 0;
+}
+
+/* Context: netif_addr_lock held, BHs disabled. */
+static void efx_set_rx_mode(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->port_enabled)
+ queue_work(efx->workqueue, &efx->mac_work);
+ /* Otherwise efx_start_port() will do this */
+}
+
+static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ /* If disabling RX n-tuple filtering, clear existing filters */
+ if (net_dev->features & ~data & NETIF_F_NTUPLE)
+ return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+
+ return 0;
+}
+
+static const struct net_device_ops efx_farch_netdev_ops = {
+ .ndo_open = efx_net_open,
+ .ndo_stop = efx_net_stop,
+ .ndo_get_stats64 = efx_net_stats,
+ .ndo_tx_timeout = efx_watchdog,
+ .ndo_start_xmit = efx_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = efx_ioctl,
+ .ndo_change_mtu = efx_change_mtu,
+ .ndo_set_mac_address = efx_set_mac_address,
+ .ndo_set_rx_mode = efx_set_rx_mode,
+ .ndo_set_features = efx_set_features,
+#ifdef CONFIG_SFC_SRIOV
+ .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac,
+ .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
+ .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
+ .ndo_get_vf_config = efx_siena_sriov_get_vf_config,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = efx_netpoll,
+#endif
+ .ndo_setup_tc = efx_setup_tc,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = efx_busy_poll,
+#endif
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_filter_rfs,
+#endif
+};
+
+static const struct net_device_ops efx_ef10_netdev_ops = {
+ .ndo_open = efx_net_open,
+ .ndo_stop = efx_net_stop,
+ .ndo_get_stats64 = efx_net_stats,
+ .ndo_tx_timeout = efx_watchdog,
+ .ndo_start_xmit = efx_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = efx_ioctl,
+ .ndo_change_mtu = efx_change_mtu,
+ .ndo_set_mac_address = efx_set_mac_address,
+ .ndo_set_rx_mode = efx_set_rx_mode,
+ .ndo_set_features = efx_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = efx_netpoll,
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = efx_busy_poll,
+#endif
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_filter_rfs,
+#endif
+};
+
+static void efx_update_name(struct efx_nic *efx)
+{
+ strcpy(efx->name, efx->net_dev->name);
+ efx_mtd_rename(efx);
+ efx_set_channel_names(efx);
+}
+
+static int efx_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
+
+ if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
+ net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
+ event == NETDEV_CHANGENAME)
+ efx_update_name(netdev_priv(net_dev));
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efx_netdev_notifier = {
+ .notifier_call = efx_netdev_event,
+};
+
+static ssize_t
+show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ return sprintf(buf, "%d\n", efx->phy_type);
+}
+static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
+
+static int efx_register_netdev(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_channel *channel;
+ int rc;
+
+ net_dev->watchdog_timeo = 5 * HZ;
+ net_dev->irq = efx->pci_dev->irq;
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ net_dev->netdev_ops = &efx_ef10_netdev_ops;
+ net_dev->priv_flags |= IFF_UNICAST_FLT;
+ } else {
+ net_dev->netdev_ops = &efx_farch_netdev_ops;
+ }
+ net_dev->ethtool_ops = &efx_ethtool_ops;
+ net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
+
+ rtnl_lock();
+
+ /* Enable resets to be scheduled and check whether any were
+ * already requested. If so, the NIC is probably hosed so we
+ * abort.
+ */
+ efx->state = STATE_READY;
+ smp_mb(); /* ensure we change state before checking reset_pending */
+ if (efx->reset_pending) {
+ netif_err(efx, probe, efx->net_dev,
+ "aborting probe due to scheduled reset\n");
+ rc = -EIO;
+ goto fail_locked;
+ }
+
+ rc = dev_alloc_name(net_dev, net_dev->name);
+ if (rc < 0)
+ goto fail_locked;
+ efx_update_name(efx);
+
+ /* Always start with carrier off; PHY events will detect the link */
+ netif_carrier_off(net_dev);
+
+ rc = register_netdevice(net_dev);
+ if (rc)
+ goto fail_locked;
+
+ efx_for_each_channel(channel, efx) {
+ struct efx_tx_queue *tx_queue;
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_init_tx_queue_core_txq(tx_queue);
+ }
+
+ efx_associate(efx);
+
+ rtnl_unlock();
+
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ goto fail_registered;
+ }
+
+ return 0;
+
+fail_registered:
+ rtnl_lock();
+ efx_dissociate(efx);
+ unregister_netdevice(net_dev);
+fail_locked:
+ efx->state = STATE_UNINIT;
+ rtnl_unlock();
+ netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
+ return rc;
+}
+
+static void efx_unregister_netdev(struct efx_nic *efx)
+{
+ if (!efx->net_dev)
+ return;
+
+ BUG_ON(netdev_priv(efx->net_dev) != efx);
+
+ strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+
+ rtnl_lock();
+ unregister_netdevice(efx->net_dev);
+ efx->state = STATE_UNINIT;
+ rtnl_unlock();
+}
+
+/**************************************************************************
+ *
+ * Device reset and suspend
+ *
+ **************************************************************************/
+
+/* Tears down the entire software state and most of the hardware state
+ * before reset. */
+void efx_reset_down(struct efx_nic *efx, enum reset_type method)
+{
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->prepare_flr(efx);
+
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
+ efx->phy_op->fini(efx);
+ efx->type->fini(efx);
+}
+
+/* This function will always ensure that the locks acquired in
+ * efx_reset_down() are released. A failure return code indicates
+ * that we were unable to reinitialise the hardware, and the
+ * driver should be disabled. If ok is false, then the rx and tx
+ * engines are not restarted, pending a RESET_DISABLE. */
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
+{
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
+ if (method == RESET_TYPE_MCDI_TIMEOUT)
+ efx->type->finish_flr(efx);
+
+ /* Ensure that SRAM is initialised even if we're disabling the device */
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
+ goto fail;
+ }
+
+ if (!ok)
+ goto fail;
+
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
+ rc = efx->phy_op->init(efx);
+ if (rc)
+ goto fail;
+ if (efx->phy_op->reconfigure(efx))
+ netif_err(efx, drv, efx->net_dev,
+ "could not restore PHY settings\n");
+ }
+
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+ efx_restore_filters(efx);
+ efx->type->sriov_reset(efx);
+
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+
+ return 0;
+
+fail:
+ efx->port_initialized = false;
+
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+/* Reset the NIC using the specified method. Note that the reset may
+ * fail, in which case the card will be left in an unusable state.
+ *
+ * Caller must hold the rtnl_lock.
+ */
+int efx_reset(struct efx_nic *efx, enum reset_type method)
+{
+ int rc, rc2;
+ bool disabled;
+
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
+
+ efx_device_detach_sync(efx);
+ efx_reset_down(efx, method);
+
+ rc = efx->type->reset(efx, method);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
+ goto out;
+ }
+
+ /* Clear flags for the scopes we covered. We assume the NIC and
+ * driver are now quiescent so that there is no race here.
+ */
+ if (method < RESET_TYPE_MAX_METHOD)
+ efx->reset_pending &= -(1 << (method + 1));
+ else /* it doesn't fit into the well-ordered scope hierarchy */
+ __clear_bit(method, &efx->reset_pending);
+
+ /* Reinitialise bus-mastering, which may have been turned off before
+ * the reset was scheduled. This is still appropriate, even in the
+ * RESET_TYPE_DISABLE since this driver generally assumes the hardware
+ * can respond to requests. */
+ pci_set_master(efx->pci_dev);
+
+out:
+ /* Leave device stopped if necessary */
+ disabled = rc ||
+ method == RESET_TYPE_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_DISABLE;
+ rc2 = efx_reset_up(efx, method, !disabled);
+ if (rc2) {
+ disabled = true;
+ if (!rc)
+ rc = rc2;
+ }
+
+ if (disabled) {
+ dev_close(efx->net_dev);
+ netif_err(efx, drv, efx->net_dev, "has been disabled\n");
+ efx->state = STATE_DISABLED;
+ } else {
+ netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
+ netif_device_attach(efx->net_dev);
+ }
+ return rc;
+}
+
+/* Try recovery mechanisms.
+ * For now only EEH is supported.
+ * Returns 0 if the recovery mechanisms are unsuccessful.
+ * Returns a non-zero value otherwise.
+ */
+int efx_try_recovery(struct efx_nic *efx)
+{
+#ifdef CONFIG_EEH
+ /* A PCI error can occur and not be seen by EEH because nothing
+ * happens on the PCI bus. In this case the driver may fail and
+ * schedule a 'recover or reset', leading to this recovery handler.
+ * Manually call the eeh failure check function.
+ */
+ struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
+ if (eeh_dev_check_failure(eehdev)) {
+ /* The EEH mechanisms will handle the error and reset the
+ * device if necessary.
+ */
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+ if (efx_mcdi_poll_reboot(efx))
+ goto out;
+ msleep(BIST_WAIT_DELAY_MS);
+ }
+
+ netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+ /* Either way unset the BIST flag. If we found no reboot we probably
+ * won't recover, but we should try.
+ */
+ efx->mc_bist_for_other_fn = false;
+}
+
+/* The worker thread exists so that code that cannot sleep can
+ * schedule a reset for later.
+ */
+static void efx_reset_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+ unsigned long pending;
+ enum reset_type method;
+
+ pending = ACCESS_ONCE(efx->reset_pending);
+ method = fls(pending) - 1;
+
+ if (method == RESET_TYPE_MC_BIST)
+ efx_wait_for_bist_end(efx);
+
+ if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_ALL) &&
+ efx_try_recovery(efx))
+ return;
+
+ if (!pending)
+ return;
+
+ rtnl_lock();
+
+ /* We checked the state in efx_schedule_reset() but it may
+ * have changed by now. Now that we have the RTNL lock,
+ * it cannot change again.
+ */
+ if (efx->state == STATE_READY)
+ (void)efx_reset(efx, method);
+
+ rtnl_unlock();
+}
+
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
+{
+ enum reset_type method;
+
+ if (efx->state == STATE_RECOVERY) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "recovering: skip scheduling %s reset\n",
+ RESET_TYPE(type));
+ return;
+ }
+
+ switch (type) {
+ case RESET_TYPE_INVISIBLE:
+ case RESET_TYPE_ALL:
+ case RESET_TYPE_RECOVER_OR_ALL:
+ case RESET_TYPE_WORLD:
+ case RESET_TYPE_DISABLE:
+ case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_MC_BIST:
+ case RESET_TYPE_MCDI_TIMEOUT:
+ method = type;
+ netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+ RESET_TYPE(method));
+ break;
+ default:
+ method = efx->type->map_reset_reason(type);
+ netif_dbg(efx, drv, efx->net_dev,
+ "scheduling %s reset for %s\n",
+ RESET_TYPE(method), RESET_TYPE(type));
+ break;
+ }
+
+ set_bit(method, &efx->reset_pending);
+ smp_mb(); /* ensure we change reset_pending before checking state */
+
+ /* If we're not READY then just leave the flags set as the cue
+ * to abort probing or reschedule the reset later.
+ */
+ if (ACCESS_ONCE(efx->state) != STATE_READY)
+ return;
+
+ /* efx_process_channel() will no longer read events once a
+ * reset is scheduled. So switch back to poll'd MCDI completions. */
+ efx_mcdi_mode_poll(efx);
+
+ queue_work(reset_workqueue, &efx->reset_work);
+}
+
+/**************************************************************************
+ *
+ * List of NICs we support
+ *
+ **************************************************************************/
+
+/* PCI device ID table */
+static const struct pci_device_id efx_pci_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
+ .driver_data = (unsigned long) &falcon_a1_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
+ .driver_data = (unsigned long) &falcon_b0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
+ .driver_data = (unsigned long) &siena_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
+ .driver_data = (unsigned long) &siena_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
+ .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */
+ .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+ {0} /* end of list */
+};
+
+/**************************************************************************
+ *
+ * Dummy PHY/MAC operations
+ *
+ * Can be used for some unimplemented operations
+ * Needed so all function pointers are valid and do not have to be tested
+ * before use
+ *
+ **************************************************************************/
+int efx_port_dummy_op_int(struct efx_nic *efx)
+{
+ return 0;
+}
+void efx_port_dummy_op_void(struct efx_nic *efx) {}
+
+static bool efx_port_dummy_op_poll(struct efx_nic *efx)
+{
+ return false;
+}
+
+static const struct efx_phy_operations efx_dummy_phy_operations = {
+ .init = efx_port_dummy_op_int,
+ .reconfigure = efx_port_dummy_op_int,
+ .poll = efx_port_dummy_op_poll,
+ .fini = efx_port_dummy_op_void,
+};
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+/* This zeroes out and then fills in the invariants in a struct
+ * efx_nic (including all sub-structures).
+ */
+static int efx_init_struct(struct efx_nic *efx,
+ struct pci_dev *pci_dev, struct net_device *net_dev)
+{
+ int i;
+
+ /* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
+ spin_lock_init(&efx->biu_lock);
+#ifdef CONFIG_SFC_MTD
+ INIT_LIST_HEAD(&efx->mtd_list);
+#endif
+ INIT_WORK(&efx->reset_work, efx_reset_work);
+ INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
+ INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
+ efx->pci_dev = pci_dev;
+ efx->msg_enable = debug;
+ efx->state = STATE_UNINIT;
+ strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+
+ efx->net_dev = net_dev;
+ efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_ip_align =
+ NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
+ efx->rx_packet_hash_offset =
+ efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
+ spin_lock_init(&efx->stats_lock);
+ mutex_init(&efx->mac_lock);
+ efx->phy_op = &efx_dummy_phy_operations;
+ efx->mdio.dev = net_dev;
+ INIT_WORK(&efx->mac_work, efx_mac_work);
+ init_waitqueue_head(&efx->flush_wq);
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++) {
+ efx->channel[i] = efx_alloc_channel(efx, i, NULL);
+ if (!efx->channel[i])
+ goto fail;
+ efx->msi_context[i].efx = efx;
+ efx->msi_context[i].index = i;
+ }
+
+ /* Higher numbered interrupt modes are less capable! */
+ efx->interrupt_mode = max(efx->type->max_interrupt_mode,
+ interrupt_mode);
+
+ /* Would be good to use the net_dev name, but we're too early */
+ snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
+ pci_name(pci_dev));
+ efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
+ if (!efx->workqueue)
+ goto fail;
+
+ return 0;
+
+fail:
+ efx_fini_struct(efx);
+ return -ENOMEM;
+}
+
+static void efx_fini_struct(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < EFX_MAX_CHANNELS; i++)
+ kfree(efx->channel[i]);
+
+ kfree(efx->vpd_sn);
+
+ if (efx->workqueue) {
+ destroy_workqueue(efx->workqueue);
+ efx->workqueue = NULL;
+ }
+}
+
+void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
+{
+ u64 n_rx_nodesc_trunc = 0;
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
+ stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
+ stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
+}
+
+/**************************************************************************
+ *
+ * PCI interface
+ *
+ **************************************************************************/
+
+/* Main body of final NIC shutdown code
+ * This is called only at module unload (or hotplug removal).
+ */
+static void efx_pci_remove_main(struct efx_nic *efx)
+{
+ /* Flush reset_work. It can no longer be scheduled since we
+ * are not READY.
+ */
+ BUG_ON(efx->state == STATE_READY);
+ cancel_work_sync(&efx->reset_work);
+
+ efx_disable_interrupts(efx);
+ efx_nic_fini_interrupt(efx);
+ efx_fini_port(efx);
+ efx->type->fini(efx);
+ efx_fini_napi(efx);
+ efx_remove_all(efx);
+}
+
+/* Final NIC shutdown
+ * This is called only at module unload (or hotplug removal).
+ */
+static void efx_pci_remove(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx;
+
+ efx = pci_get_drvdata(pci_dev);
+ if (!efx)
+ return;
+
+ /* Mark the NIC as fini, then stop the interface */
+ rtnl_lock();
+ efx_dissociate(efx);
+ dev_close(efx->net_dev);
+ efx_disable_interrupts(efx);
+ rtnl_unlock();
+
+ efx->type->sriov_fini(efx);
+ efx_unregister_netdev(efx);
+
+ efx_mtd_remove(efx);
+
+ efx_pci_remove_main(efx);
+
+ efx_fini_io(efx);
+ netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+
+ efx_fini_struct(efx);
+ free_netdev(efx->net_dev);
+
+ pci_disable_pcie_error_reporting(pci_dev);
+};
+
+/* NIC VPD information
+ * Called during probe to display the part number of the
+ * installed NIC. VPD is potentially very large but this should
+ * always appear within the first 512 bytes.
+ */
+#define SFC_VPD_LEN 512
+static void efx_probe_vpd_strings(struct efx_nic *efx)
+{
+ struct pci_dev *dev = efx->pci_dev;
+ char vpd_data[SFC_VPD_LEN];
+ ssize_t vpd_size;
+ int ro_start, ro_size, i, j;
+
+ /* Get the vpd data from the device */
+ vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
+ if (vpd_size <= 0) {
+ netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
+ return;
+ }
+
+ /* Get the Read only section */
+ ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ if (ro_start < 0) {
+ netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
+ return;
+ }
+
+ ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
+ j = ro_size;
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+ if (i + j > vpd_size)
+ j = vpd_size - i;
+
+ /* Get the Part number */
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "Part number not found\n");
+ return;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (i + j > vpd_size) {
+ netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
+ return;
+ }
+
+ netif_info(efx, drv, efx->net_dev,
+ "Part Number : %.*s\n", j, &vpd_data[i]);
+
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+ j = ro_size;
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
+ return;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (i + j > vpd_size) {
+ netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
+ return;
+ }
+
+ efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
+ if (!efx->vpd_sn)
+ return;
+
+ snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
+}
+
+
+/* Main body of NIC initialisation
+ * This is called at module load (or hotplug insertion, theoretically).
+ */
+static int efx_pci_probe_main(struct efx_nic *efx)
+{
+ int rc;
+
+ /* Do start-of-day initialisation */
+ rc = efx_probe_all(efx);
+ if (rc)
+ goto fail1;
+
+ efx_init_napi(efx);
+
+ rc = efx->type->init(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise NIC\n");
+ goto fail3;
+ }
+
+ rc = efx_init_port(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise port\n");
+ goto fail4;
+ }
+
+ rc = efx_nic_init_interrupt(efx);
+ if (rc)
+ goto fail5;
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail6;
+
+ return 0;
+
+ fail6:
+ efx_nic_fini_interrupt(efx);
+ fail5:
+ efx_fini_port(efx);
+ fail4:
+ efx->type->fini(efx);
+ fail3:
+ efx_fini_napi(efx);
+ efx_remove_all(efx);
+ fail1:
+ return rc;
+}
+
+/* NIC initialisation
+ *
+ * This is called at module load (or hotplug insertion,
+ * theoretically). It sets up PCI mappings, resets the NIC,
+ * sets up and registers the network devices with the kernel and hooks
+ * the interrupt service routine. It does not prepare the device for
+ * transmission; this is left to the first time one of the network
+ * interfaces is brought up (i.e. efx_net_open).
+ */
+static int efx_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *entry)
+{
+ struct net_device *net_dev;
+ struct efx_nic *efx;
+ int rc;
+
+ /* Allocate and initialise a struct net_device and struct efx_nic */
+ net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
+ EFX_MAX_RX_QUEUES);
+ if (!net_dev)
+ return -ENOMEM;
+ efx = netdev_priv(net_dev);
+ efx->type = (const struct efx_nic_type *) entry->driver_data;
+ net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_TSO |
+ NETIF_F_RXCSUM);
+ if (efx->type->offload_features & NETIF_F_V6_CSUM)
+ net_dev->features |= NETIF_F_TSO6;
+ /* Mask for features that also apply to VLAN devices */
+ net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
+ NETIF_F_RXCSUM);
+ /* All offloads can be toggled */
+ net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
+ pci_set_drvdata(pci_dev, efx);
+ SET_NETDEV_DEV(net_dev, &pci_dev->dev);
+ rc = efx_init_struct(efx, pci_dev, net_dev);
+ if (rc)
+ goto fail1;
+
+ netif_info(efx, probe, efx->net_dev,
+ "Solarflare NIC detected\n");
+
+ efx_probe_vpd_strings(efx);
+
+ /* Set up basic I/O (BAR mappings etc) */
+ rc = efx_init_io(efx);
+ if (rc)
+ goto fail2;
+
+ rc = efx_pci_probe_main(efx);
+ if (rc)
+ goto fail3;
+
+ rc = efx_register_netdev(efx);
+ if (rc)
+ goto fail4;
+
+ rc = efx->type->sriov_init(efx);
+ if (rc)
+ netif_err(efx, probe, efx->net_dev,
+ "SR-IOV can't be enabled rc %d\n", rc);
+
+ netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+
+ /* Try to create MTDs, but allow this to fail */
+ rtnl_lock();
+ rc = efx_mtd_probe(efx);
+ rtnl_unlock();
+ if (rc)
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to create MTDs (%d)\n", rc);
+
+ rc = pci_enable_pcie_error_reporting(pci_dev);
+ if (rc && rc != -EINVAL)
+ netif_warn(efx, probe, efx->net_dev,
+ "pci_enable_pcie_error_reporting failed (%d)\n", rc);
+
+ return 0;
+
+ fail4:
+ efx_pci_remove_main(efx);
+ fail3:
+ efx_fini_io(efx);
+ fail2:
+ efx_fini_struct(efx);
+ fail1:
+ WARN_ON(rc > 0);
+ netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
+ free_netdev(net_dev);
+ return rc;
+}
+
+static int efx_pm_freeze(struct device *dev)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ efx->state = STATE_UNINIT;
+
+ efx_device_detach_sync(efx);
+
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+ }
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int efx_pm_thaw(struct device *dev)
+{
+ int rc;
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ mutex_lock(&efx->mac_lock);
+ efx->phy_op->reconfigure(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
+
+ netif_device_attach(efx->net_dev);
+
+ efx->state = STATE_READY;
+
+ efx->type->resume_wol(efx);
+ }
+
+ rtnl_unlock();
+
+ /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
+ queue_work(reset_workqueue, &efx->reset_work);
+
+ return 0;
+
+fail:
+ rtnl_unlock();
+
+ return rc;
+}
+
+static int efx_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+ efx->type->fini(efx);
+
+ efx->reset_pending = 0;
+
+ pci_save_state(pci_dev);
+ return pci_set_power_state(pci_dev, PCI_D3hot);
+}
+
+/* Used for both resume and restore */
+static int efx_pm_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+ int rc;
+
+ rc = pci_set_power_state(pci_dev, PCI_D0);
+ if (rc)
+ return rc;
+ pci_restore_state(pci_dev);
+ rc = pci_enable_device(pci_dev);
+ if (rc)
+ return rc;
+ pci_set_master(efx->pci_dev);
+ rc = efx->type->reset(efx, RESET_TYPE_ALL);
+ if (rc)
+ return rc;
+ rc = efx->type->init(efx);
+ if (rc)
+ return rc;
+ rc = efx_pm_thaw(dev);
+ return rc;
+}
+
+static int efx_pm_suspend(struct device *dev)
+{
+ int rc;
+
+ efx_pm_freeze(dev);
+ rc = efx_pm_poweroff(dev);
+ if (rc)
+ efx_pm_resume(dev);
+ return rc;
+}
+
+static const struct dev_pm_ops efx_pm_ops = {
+ .suspend = efx_pm_suspend,
+ .resume = efx_pm_resume,
+ .freeze = efx_pm_freeze,
+ .thaw = efx_pm_thaw,
+ .poweroff = efx_pm_poweroff,
+ .restore = efx_pm_resume,
+};
+
+/* A PCI error affecting this device was detected.
+ * At this point MMIO and DMA may be disabled.
+ * Stop the software path and request a slot reset.
+ */
+static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state state)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ efx->state = STATE_RECOVERY;
+ efx->reset_pending = 0;
+
+ efx_device_detach_sync(efx);
+
+ efx_stop_all(efx);
+ efx_disable_interrupts(efx);
+
+ status = PCI_ERS_RESULT_NEED_RESET;
+ } else {
+ /* If the interface is disabled we don't want to do anything
+ * with it.
+ */
+ status = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ rtnl_unlock();
+
+ pci_disable_device(pdev);
+
+ return status;
+}
+
+/* Fake a successful reset, which will be performed later in efx_io_resume. */
+static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+ int rc;
+
+ if (pci_enable_device(pdev)) {
+ netif_err(efx, hw, efx->net_dev,
+ "Cannot re-enable PCI device after reset.\n");
+ status = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ rc = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
+ /* Non-fatal error. Continue. */
+ }
+
+ return status;
+}
+
+/* Perform the actual reset and resume I/O operations. */
+static void efx_io_resume(struct pci_dev *pdev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+ int rc;
+
+ rtnl_lock();
+
+ if (efx->state == STATE_DISABLED)
+ goto out;
+
+ rc = efx_reset(efx, RESET_TYPE_ALL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "efx_reset failed after PCI error (%d)\n", rc);
+ } else {
+ efx->state = STATE_READY;
+ netif_dbg(efx, hw, efx->net_dev,
+ "Done resetting and resuming IO after PCI error.\n");
+ }
+
+out:
+ rtnl_unlock();
+}
+
+/* For simplicity and reliability, we always require a slot reset and try to
+ * reset the hardware when a pci error affecting the device is detected.
+ * We leave both the link_reset and mmio_enabled callback unimplemented:
+ * with our request for slot reset the mmio_enabled callback will never be
+ * called, and the link_reset callback is not used by AER or EEH mechanisms.
+ */
+static struct pci_error_handlers efx_err_handlers = {
+ .error_detected = efx_io_error_detected,
+ .slot_reset = efx_io_slot_reset,
+ .resume = efx_io_resume,
+};
+
+static struct pci_driver efx_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = efx_pci_table,
+ .probe = efx_pci_probe,
+ .remove = efx_pci_remove,
+ .driver.pm = &efx_pm_ops,
+ .err_handler = &efx_err_handlers,
+};
+
+/**************************************************************************
+ *
+ * Kernel module interface
+ *
+ *************************************************************************/
+
+module_param(interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+ "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+static int __init efx_init_module(void)
+{
+ int rc;
+
+ printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
+
+ rc = register_netdevice_notifier(&efx_netdev_notifier);
+ if (rc)
+ goto err_notifier;
+
+ rc = efx_init_sriov();
+ if (rc)
+ goto err_sriov;
+
+ reset_workqueue = create_singlethread_workqueue("sfc_reset");
+ if (!reset_workqueue) {
+ rc = -ENOMEM;
+ goto err_reset;
+ }
+
+ rc = pci_register_driver(&efx_pci_driver);
+ if (rc < 0)
+ goto err_pci;
+
+ return 0;
+
+ err_pci:
+ destroy_workqueue(reset_workqueue);
+ err_reset:
+ efx_fini_sriov();
+ err_sriov:
+ unregister_netdevice_notifier(&efx_netdev_notifier);
+ err_notifier:
+ return rc;
+}
+
+static void __exit efx_exit_module(void)
+{
+ printk(KERN_INFO "Solarflare NET driver unloading\n");
+
+ pci_unregister_driver(&efx_pci_driver);
+ destroy_workqueue(reset_workqueue);
+ efx_fini_sriov();
+ unregister_netdevice_notifier(&efx_netdev_notifier);
+
+}
+
+module_init(efx_init_module);
+module_exit(efx_exit_module);
+
+MODULE_AUTHOR("Solarflare Communications and "
+ "Michael Brown <mbrown@fensystems.co.uk>");
+MODULE_DESCRIPTION("Solarflare network driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/kernel/drivers/net/ethernet/sfc/efx.h b/kernel/drivers/net/ethernet/sfc/efx.h
new file mode 100644
index 000000000..2587c582a
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/efx.h
@@ -0,0 +1,255 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EFX_H
+#define EFX_EFX_H
+
+#include "net_driver.h"
+#include "filter.h"
+
+/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+#define EFX_MEM_BAR 2
+
+/* TX */
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev);
+netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+extern unsigned int efx_piobuf_size;
+
+/* RX */
+void efx_rx_config_page_split(struct efx_nic *efx);
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
+void efx_rx_slow_fill(unsigned long context);
+void __efx_rx_packet(struct efx_channel *channel);
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ unsigned int n_frags, unsigned int len, u16 flags);
+static inline void efx_rx_flush_packet(struct efx_channel *channel)
+{
+ if (channel->rx_pkt_n_frags)
+ __efx_rx_packet(channel);
+}
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+
+#define EFX_MAX_DMAQ_SIZE 4096UL
+#define EFX_DEFAULT_DMAQ_SIZE 1024UL
+#define EFX_MIN_DMAQ_SIZE 512UL
+
+#define EFX_MAX_EVQ_SIZE 16384UL
+#define EFX_MIN_EVQ_SIZE 512UL
+
+/* Maximum number of TCP segments we support for soft-TSO */
+#define EFX_TSO_MAX_SEGS 100
+
+/* The smallest [rt]xq_entries that the driver supports. RX minimum
+ * is a bit arbitrary. For TX, we must have space for at least 2
+ * TSO skbs.
+ */
+#define EFX_RXQ_MIN_ENT 128U
+#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
+
+#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
+ EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
+
+/* Filters */
+
+/**
+ * efx_filter_insert_filter - add or replace a filter
+ * @efx: NIC in which to insert the filter
+ * @spec: Specification for the filter
+ * @replace_equal: Flag for whether the specified filter may replace an
+ * existing filter with equal priority
+ *
+ * On success, return the filter ID.
+ * On failure, return a negative error code.
+ *
+ * If existing filters have equal match values to the new filter spec,
+ * then the new filter might replace them or the function might fail,
+ * as follows.
+ *
+ * 1. If the existing filters have lower priority, or @replace_equal
+ * is set and they have equal priority, replace them.
+ *
+ * 2. If the existing filters have higher priority, return -%EPERM.
+ *
+ * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
+ * support delivery to multiple recipients, return -%EEXIST.
+ *
+ * This implies that filters for multiple multicast recipients must
+ * all be inserted with the same priority and @replace_equal = %false.
+ */
+static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ return efx->type->filter_insert(efx, spec, replace_equal);
+}
+
+/**
+ * efx_filter_remove_id_safe - remove a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx->type->filter_remove_safe(efx, priority, filter_id);
+}
+
+/**
+ * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int
+efx_filter_get_filter_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ return efx->type->filter_get_safe(efx, priority, filter_id, spec);
+}
+
+static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ return efx->type->filter_count_rx_used(efx, priority);
+}
+static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ return efx->type->filter_get_rx_id_limit(efx);
+}
+static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ return efx->type->filter_get_rx_ids(efx, priority, buf, size);
+}
+#ifdef CONFIG_RFS_ACCEL
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+static inline void efx_filter_rfs_expire(struct efx_channel *channel)
+{
+ if (channel->rfs_filters_added >= 60 &&
+ __efx_filter_rfs_expire(channel->efx, 100))
+ channel->rfs_filters_added -= 60;
+}
+#define efx_filter_rfs_enabled() 1
+#else
+static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
+#define efx_filter_rfs_enabled() 0
+#endif
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+
+/* Channels */
+int efx_channel_dummy_op_int(struct efx_channel *channel);
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+
+/* Ports */
+int efx_reconfigure_port(struct efx_nic *efx);
+int __efx_reconfigure_port(struct efx_nic *efx);
+
+/* Ethtool support */
+extern const struct ethtool_ops efx_ethtool_ops;
+
+/* Reset handling */
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_try_recovery(struct efx_nic *efx);
+
+/* Global */
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx);
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive);
+void efx_stop_eventq(struct efx_channel *channel);
+void efx_start_eventq(struct efx_channel *channel);
+
+/* Dummy PHY ops for PHY drivers */
+int efx_port_dummy_op_int(struct efx_nic *efx);
+void efx_port_dummy_op_void(struct efx_nic *efx);
+
+/* Update the generic software stats in the passed stats array */
+void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
+
+/* MTD */
+#ifdef CONFIG_SFC_MTD
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part);
+static inline int efx_mtd_probe(struct efx_nic *efx)
+{
+ return efx->type->mtd_probe(efx);
+}
+void efx_mtd_rename(struct efx_nic *efx);
+void efx_mtd_remove(struct efx_nic *efx);
+#else
+static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
+static inline void efx_mtd_rename(struct efx_nic *efx) {}
+static inline void efx_mtd_remove(struct efx_nic *efx) {}
+#endif
+
+static inline void efx_schedule_channel(struct efx_channel *channel)
+{
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d scheduling NAPI poll on CPU%d\n",
+ channel->channel, raw_smp_processor_id());
+
+ napi_schedule(&channel->napi_str);
+}
+
+static inline void efx_schedule_channel_irq(struct efx_channel *channel)
+{
+ channel->event_test_cpu = raw_smp_processor_id();
+ efx_schedule_channel(channel);
+}
+
+void efx_link_status_changed(struct efx_nic *efx);
+void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+
+static inline void efx_device_detach_sync(struct efx_nic *efx)
+{
+ struct net_device *dev = efx->net_dev;
+
+ /* Lock/freeze all TX queues so that we can be sure the
+ * TX scheduler is stopped when we're done and before
+ * netif_device_present() becomes false.
+ */
+ netif_tx_lock_bh(dev);
+ netif_device_detach(dev);
+ netif_tx_unlock_bh(dev);
+}
+
+#endif /* EFX_EFX_H */
diff --git a/kernel/drivers/net/ethernet/sfc/enum.h b/kernel/drivers/net/ethernet/sfc/enum.h
new file mode 100644
index 000000000..d1dbb5fb3
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/enum.h
@@ -0,0 +1,181 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_ENUM_H
+#define EFX_ENUM_H
+
+/**
+ * enum efx_loopback_mode - loopback modes
+ * @LOOPBACK_NONE: no loopback
+ * @LOOPBACK_DATA: data path loopback
+ * @LOOPBACK_GMAC: loopback within GMAC
+ * @LOOPBACK_XGMII: loopback after XMAC
+ * @LOOPBACK_XGXS: loopback within BPX after XGXS
+ * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
+ * @LOOPBACK_GMII: loopback within BPX after GMAC
+ * @LOOPBACK_SGMII: loopback within BPX within SGMII
+ * @LOOPBACK_XGBR: loopback within BPX within XGBR
+ * @LOOPBACK_XFI: loopback within BPX before XFI serdes
+ * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
+ * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
+ * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
+ * @LOOPBACK_XFI_FAR: loopback after XFI serdes
+ * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
+ * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
+ * @LOOPBACK_PCS: loopback within 10G PHY at PCS level
+ * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
+ * @LOOPBACK_XPORT: cross port loopback
+ * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
+ * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
+ * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
+ * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
+ * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
+ * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
+ * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
+ * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
+ */
+/* Please keep up-to-date w.r.t the following two #defines */
+enum efx_loopback_mode {
+ LOOPBACK_NONE = 0,
+ LOOPBACK_DATA = 1,
+ LOOPBACK_GMAC = 2,
+ LOOPBACK_XGMII = 3,
+ LOOPBACK_XGXS = 4,
+ LOOPBACK_XAUI = 5,
+ LOOPBACK_GMII = 6,
+ LOOPBACK_SGMII = 7,
+ LOOPBACK_XGBR = 8,
+ LOOPBACK_XFI = 9,
+ LOOPBACK_XAUI_FAR = 10,
+ LOOPBACK_GMII_FAR = 11,
+ LOOPBACK_SGMII_FAR = 12,
+ LOOPBACK_XFI_FAR = 13,
+ LOOPBACK_GPHY = 14,
+ LOOPBACK_PHYXS = 15,
+ LOOPBACK_PCS = 16,
+ LOOPBACK_PMAPMD = 17,
+ LOOPBACK_XPORT = 18,
+ LOOPBACK_XGMII_WS = 19,
+ LOOPBACK_XAUI_WS = 20,
+ LOOPBACK_XAUI_WS_FAR = 21,
+ LOOPBACK_XAUI_WS_NEAR = 22,
+ LOOPBACK_GMII_WS = 23,
+ LOOPBACK_XFI_WS = 24,
+ LOOPBACK_XFI_WS_FAR = 25,
+ LOOPBACK_PHYXS_WS = 26,
+ LOOPBACK_MAX
+};
+#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
+
+/* These loopbacks occur within the controller */
+#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \
+ (1 << LOOPBACK_GMAC) | \
+ (1 << LOOPBACK_XGMII)| \
+ (1 << LOOPBACK_XGXS) | \
+ (1 << LOOPBACK_XAUI) | \
+ (1 << LOOPBACK_GMII) | \
+ (1 << LOOPBACK_SGMII) | \
+ (1 << LOOPBACK_SGMII) | \
+ (1 << LOOPBACK_XGBR) | \
+ (1 << LOOPBACK_XFI) | \
+ (1 << LOOPBACK_XAUI_FAR) | \
+ (1 << LOOPBACK_GMII_FAR) | \
+ (1 << LOOPBACK_SGMII_FAR) | \
+ (1 << LOOPBACK_XFI_FAR) | \
+ (1 << LOOPBACK_XGMII_WS) | \
+ (1 << LOOPBACK_XAUI_WS) | \
+ (1 << LOOPBACK_XAUI_WS_FAR) | \
+ (1 << LOOPBACK_XAUI_WS_NEAR) | \
+ (1 << LOOPBACK_GMII_WS) | \
+ (1 << LOOPBACK_XFI_WS) | \
+ (1 << LOOPBACK_XFI_WS_FAR))
+
+#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \
+ (1 << LOOPBACK_XAUI_WS) | \
+ (1 << LOOPBACK_XAUI_WS_FAR) | \
+ (1 << LOOPBACK_XAUI_WS_NEAR) | \
+ (1 << LOOPBACK_GMII_WS) | \
+ (1 << LOOPBACK_XFI_WS) | \
+ (1 << LOOPBACK_XFI_WS_FAR) | \
+ (1 << LOOPBACK_PHYXS_WS))
+
+#define LOOPBACKS_EXTERNAL(_efx) \
+ ((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \
+ ~(1 << LOOPBACK_NONE))
+
+#define LOOPBACK_MASK(_efx) \
+ (1 << (_efx)->loopback_mode)
+
+#define LOOPBACK_INTERNAL(_efx) \
+ (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
+
+#define LOOPBACK_EXTERNAL(_efx) \
+ (!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
+
+#define LOOPBACK_CHANGED(_from, _to, _mask) \
+ (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
+
+#define LOOPBACK_OUT_OF(_from, _to, _mask) \
+ ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
+
+/*****************************************************************************/
+
+/**
+ * enum reset_type - reset types
+ *
+ * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
+ * %RESET_TYPE_DISABLE specify the method/scope of the reset. The
+ * other valuesspecify reasons, which efx_schedule_reset() will choose
+ * a method for.
+ *
+ * Reset methods are numbered in order of increasing scope.
+ *
+ * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
+ * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
+ * if unsuccessful.
+ * @RESET_TYPE_ALL: Reset datapath, MAC and PHY
+ * @RESET_TYPE_WORLD: Reset as much as possible
+ * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
+ * unsuccessful.
+ * @RESET_TYPE_MC_BIST: MC entering BIST mode.
+ * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
+ * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
+ * @RESET_TYPE_INT_ERROR: reset due to internal error
+ * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
+ * @RESET_TYPE_DMA_ERROR: DMA error
+ * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
+ * @RESET_TYPE_MC_FAILURE: MC reboot/assertion
+ * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout.
+ */
+enum reset_type {
+ RESET_TYPE_INVISIBLE,
+ RESET_TYPE_RECOVER_OR_ALL,
+ RESET_TYPE_ALL,
+ RESET_TYPE_WORLD,
+ RESET_TYPE_RECOVER_OR_DISABLE,
+ RESET_TYPE_MC_BIST,
+ RESET_TYPE_DISABLE,
+ RESET_TYPE_MAX_METHOD,
+ RESET_TYPE_TX_WATCHDOG,
+ RESET_TYPE_INT_ERROR,
+ RESET_TYPE_RX_RECOVERY,
+ RESET_TYPE_DMA_ERROR,
+ RESET_TYPE_TX_SKIP,
+ RESET_TYPE_MC_FAILURE,
+ /* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
+ * it doesn't fit the scope hierarchy (not well-ordered by inclusion).
+ * We encode this by having its enum value be greater than
+ * RESET_TYPE_MAX_METHOD. This also prevents issuing it with
+ * efx_ioctl_reset.
+ */
+ RESET_TYPE_MCDI_TIMEOUT,
+ RESET_TYPE_MAX,
+};
+
+#endif /* EFX_ENUM_H */
diff --git a/kernel/drivers/net/ethernet/sfc/ethtool.c b/kernel/drivers/net/ethernet/sfc/ethtool.c
new file mode 100644
index 000000000..4835bc0d0
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/ethtool.c
@@ -0,0 +1,1196 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/in.h>
+#include "net_driver.h"
+#include "workarounds.h"
+#include "selftest.h"
+#include "efx.h"
+#include "filter.h"
+#include "nic.h"
+
+struct efx_sw_stat_desc {
+ const char *name;
+ enum {
+ EFX_ETHTOOL_STAT_SOURCE_nic,
+ EFX_ETHTOOL_STAT_SOURCE_channel,
+ EFX_ETHTOOL_STAT_SOURCE_tx_queue
+ } source;
+ unsigned offset;
+ u64(*get_stat) (void *field); /* Reader function */
+};
+
+/* Initialiser for a struct efx_sw_stat_desc with type-checking */
+#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
+ get_stat_function) { \
+ .name = #stat_name, \
+ .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
+ .offset = ((((field_type *) 0) == \
+ &((struct efx_##source_name *)0)->field) ? \
+ offsetof(struct efx_##source_name, field) : \
+ offsetof(struct efx_##source_name, field)), \
+ .get_stat = get_stat_function, \
+}
+
+static u64 efx_get_uint_stat(void *field)
+{
+ return *(unsigned int *)field;
+}
+
+static u64 efx_get_atomic_stat(void *field)
+{
+ return atomic_read((atomic_t *) field);
+}
+
+#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
+ EFX_ETHTOOL_STAT(field, nic, field, \
+ atomic_t, efx_get_atomic_stat)
+
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
+ EFX_ETHTOOL_STAT(field, channel, n_##field, \
+ unsigned int, efx_get_uint_stat)
+
+#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
+ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
+ unsigned int, efx_get_uint_stat)
+
+static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
+ EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
+ EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+ EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
+ EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
+};
+
+#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
+
+#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
+
+/**************************************************************************
+ *
+ * Ethtool operations
+ *
+ **************************************************************************
+ */
+
+/* Identify device by flashing LEDs */
+static int efx_ethtool_phys_id(struct net_device *net_dev,
+ enum ethtool_phys_id_state state)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ enum efx_led_mode mode = EFX_LED_DEFAULT;
+
+ switch (state) {
+ case ETHTOOL_ID_ON:
+ mode = EFX_LED_ON;
+ break;
+ case ETHTOOL_ID_OFF:
+ mode = EFX_LED_OFF;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ mode = EFX_LED_DEFAULT;
+ break;
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+ }
+
+ efx->type->set_id_led(efx, mode);
+ return 0;
+}
+
+/* This must be called with rtnl_lock held. */
+static int efx_ethtool_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_link_state *link_state = &efx->link_state;
+
+ mutex_lock(&efx->mac_lock);
+ efx->phy_op->get_settings(efx, ecmd);
+ mutex_unlock(&efx->mac_lock);
+
+ /* Both MACs support pause frames (bidirectional and respond-only) */
+ ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ if (LOOPBACK_INTERNAL(efx)) {
+ ethtool_cmd_speed_set(ecmd, link_state->speed);
+ ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+/* This must be called with rtnl_lock held. */
+static int efx_ethtool_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ /* GMAC does not support 1000Mbps HD */
+ if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
+ (ecmd->duplex != DUPLEX_FULL)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "rejecting unsupported 1000Mbps HD setting\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&efx->mac_lock);
+ rc = efx->phy_op->set_settings(efx, ecmd);
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ efx_mcdi_print_fwver(efx, info->fw_version,
+ sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+}
+
+static int efx_ethtool_get_regs_len(struct net_device *net_dev)
+{
+ return efx_nic_get_regs_len(netdev_priv(net_dev));
+}
+
+static void efx_ethtool_get_regs(struct net_device *net_dev,
+ struct ethtool_regs *regs, void *buf)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ regs->version = efx->type->revision;
+ efx_nic_get_regs(efx, buf);
+}
+
+static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->msg_enable;
+}
+
+static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ efx->msg_enable = msg_enable;
+}
+
+/**
+ * efx_fill_test - fill in an individual self-test entry
+ * @test_index: Index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ * @test: Pointer to test result (used only if data != %NULL)
+ * @unit_format: Unit name format (e.g. "chan\%d")
+ * @unit_id: Unit id (e.g. 0 for "chan0")
+ * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
+ * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
+ *
+ * Fill in an individual self-test entry.
+ */
+static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
+ int *test, const char *unit_format, int unit_id,
+ const char *test_format, const char *test_id)
+{
+ char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
+
+ /* Fill data value, if applicable */
+ if (data)
+ data[test_index] = *test;
+
+ /* Fill string, if applicable */
+ if (strings) {
+ if (strchr(unit_format, '%'))
+ snprintf(unit_str, sizeof(unit_str),
+ unit_format, unit_id);
+ else
+ strcpy(unit_str, unit_format);
+ snprintf(test_str, sizeof(test_str), test_format, test_id);
+ snprintf(strings + test_index * ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ "%-6s %-24s", unit_str, test_str);
+ }
+}
+
+#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
+#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
+#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
+#define EFX_LOOPBACK_NAME(_mode, _counter) \
+ "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
+
+/**
+ * efx_fill_loopback_test - fill in a block of loopback self-test entries
+ * @efx: Efx NIC
+ * @lb_tests: Efx loopback self-test results structure
+ * @mode: Loopback test mode
+ * @test_index: Starting index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ *
+ * Fill in a block of loopback self-test entries. Return new test
+ * index.
+ */
+static int efx_fill_loopback_test(struct efx_nic *efx,
+ struct efx_loopback_self_tests *lb_tests,
+ enum efx_loopback_mode mode,
+ unsigned int test_index,
+ u8 *strings, u64 *data)
+{
+ struct efx_channel *channel =
+ efx_get_channel(efx, efx->tx_channel_offset);
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_sent[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_sent"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_done[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_done"));
+ }
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_good,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_good"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_bad,
+ "rx", 0,
+ EFX_LOOPBACK_NAME(mode, "rx_bad"));
+
+ return test_index;
+}
+
+/**
+ * efx_ethtool_fill_self_tests - get self-test details
+ * @efx: Efx NIC
+ * @tests: Efx self-test results structure, or %NULL
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ *
+ * Get self-test number of strings, strings, and/or test results.
+ * Return number of strings (== number of test results).
+ *
+ * The reason for merging these three functions is to make sure that
+ * they can never be inconsistent.
+ */
+static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ u8 *strings, u64 *data)
+{
+ struct efx_channel *channel;
+ unsigned int n = 0, i;
+ enum efx_loopback_mode mode;
+
+ efx_fill_test(n++, strings, data, &tests->phy_alive,
+ "phy", 0, "alive", NULL);
+ efx_fill_test(n++, strings, data, &tests->nvram,
+ "core", 0, "nvram", NULL);
+ efx_fill_test(n++, strings, data, &tests->interrupt,
+ "core", 0, "interrupt", NULL);
+
+ /* Event queues */
+ efx_for_each_channel(channel, efx) {
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_dma[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.dma", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_int[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.int", NULL);
+ }
+
+ efx_fill_test(n++, strings, data, &tests->memory,
+ "core", 0, "memory", NULL);
+ efx_fill_test(n++, strings, data, &tests->registers,
+ "core", 0, "registers", NULL);
+
+ if (efx->phy_op->run_tests != NULL) {
+ EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
+
+ for (i = 0; true; ++i) {
+ const char *name;
+
+ EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
+ name = efx->phy_op->test_name(efx, i);
+ if (name == NULL)
+ break;
+
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i],
+ "phy", 0, name, NULL);
+ }
+ }
+
+ /* Loopback tests */
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+ if (!(efx->loopback_modes & (1 << mode)))
+ continue;
+ n = efx_fill_loopback_test(efx,
+ &tests->loopback[mode], mode, n,
+ strings, data);
+ }
+
+ return n;
+}
+
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+{
+ size_t n_stats = 0;
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_tx_queues(channel)) {
+ n_stats++;
+ if (strings != NULL) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "tx-%u.tx_packets",
+ channel->tx_queue[0].queue /
+ EFX_TXQ_TYPES);
+
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel)) {
+ n_stats++;
+ if (strings != NULL) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ "rx-%d.rx_packets", channel->channel);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ return n_stats;
+}
+
+static int efx_ethtool_get_sset_count(struct net_device *net_dev,
+ int string_set)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return efx->type->describe_stats(efx, NULL) +
+ EFX_ETHTOOL_SW_STAT_COUNT +
+ efx_describe_per_queue_stats(efx, NULL) +
+ efx_ptp_describe_stats(efx, NULL);
+ case ETH_SS_TEST:
+ return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void efx_ethtool_get_strings(struct net_device *net_dev,
+ u32 string_set, u8 *strings)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int i;
+
+ switch (string_set) {
+ case ETH_SS_STATS:
+ strings += (efx->type->describe_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
+ strlcpy(strings + i * ETH_GSTRING_LEN,
+ efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
+ strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+ strings += (efx_describe_per_queue_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ efx_ptp_describe_stats(efx, strings);
+ break;
+ case ETH_SS_TEST:
+ efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
+ break;
+ default:
+ /* No other string sets */
+ break;
+ }
+}
+
+static void efx_ethtool_get_stats(struct net_device *net_dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ const struct efx_sw_stat_desc *stat;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int i;
+
+ spin_lock_bh(&efx->stats_lock);
+
+ /* Get NIC statistics */
+ data += efx->type->update_stats(efx, data, NULL);
+
+ /* Get software statistics */
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
+ stat = &efx_sw_stat_desc[i];
+ switch (stat->source) {
+ case EFX_ETHTOOL_STAT_SOURCE_nic:
+ data[i] = stat->get_stat((void *)efx + stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_channel:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx)
+ data[i] += stat->get_stat((void *)channel +
+ stat->offset);
+ break;
+ case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
+ data[i] = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ data[i] +=
+ stat->get_stat((void *)tx_queue
+ + stat->offset);
+ }
+ break;
+ }
+ }
+ data += EFX_ETHTOOL_SW_STAT_COUNT;
+
+ spin_unlock_bh(&efx->stats_lock);
+
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_tx_queues(channel)) {
+ *data = 0;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ *data += tx_queue->tx_packets;
+ }
+ data++;
+ }
+ }
+ efx_for_each_channel(channel, efx) {
+ if (efx_channel_has_rx_queue(channel)) {
+ *data = 0;
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ *data += rx_queue->rx_packets;
+ }
+ data++;
+ }
+ }
+
+ efx_ptp_update_stats(efx, data);
+}
+
+static void efx_ethtool_self_test(struct net_device *net_dev,
+ struct ethtool_test *test, u64 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_self_tests *efx_tests;
+ bool already_up;
+ int rc = -ENOMEM;
+
+ efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+ if (!efx_tests)
+ goto fail;
+
+ if (efx->state != STATE_READY) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+ /* We need rx buffers and interrupts. */
+ already_up = (efx->net_dev->flags & IFF_UP);
+ if (!already_up) {
+ rc = dev_open(efx->net_dev);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed opening device.\n");
+ goto out;
+ }
+ }
+
+ rc = efx_selftest(efx, efx_tests, test->flags);
+
+ if (!already_up)
+ dev_close(efx->net_dev);
+
+ netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+ rc == 0 ? "passed" : "failed",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+out:
+ efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+ kfree(efx_tests);
+fail:
+ if (rc)
+ test->flags |= ETH_TEST_FL_FAILED;
+}
+
+/* Restart autonegotiation */
+static int efx_ethtool_nway_reset(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return mdio45_nway_restart(&efx->mdio);
+}
+
+/*
+ * Each channel has a single IRQ and moderation timer, started by any
+ * completion (or other event). Unless the module parameter
+ * separate_tx_channels is set, IRQs and moderation are therefore
+ * shared between RX and TX completions. In this case, when RX IRQ
+ * moderation is explicitly changed then TX IRQ moderation is
+ * automatically changed too, but otherwise we fail if the two values
+ * are requested to be different.
+ *
+ * The hardware does not support a limit on the number of completions
+ * before an IRQ, so we do not use the max_frames fields. We should
+ * report and require that max_frames == (usecs != 0), but this would
+ * invalidate existing user documentation.
+ *
+ * The hardware does not have distinct settings for interrupt
+ * moderation while the previous IRQ is being handled, so we should
+ * not use the 'irq' fields. However, an earlier developer
+ * misunderstood the meaning of the 'irq' fields and the driver did
+ * not support the standard fields. To avoid invalidating existing
+ * user documentation, we report and accept changes through either the
+ * standard or 'irq' fields. If both are changed at the same time, we
+ * prefer the standard field.
+ *
+ * We implement adaptive IRQ moderation, but use a different algorithm
+ * from that assumed in the definition of struct ethtool_coalesce.
+ * Therefore we do not use any of the adaptive moderation parameters
+ * in it.
+ */
+
+static int efx_ethtool_get_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ unsigned int tx_usecs, rx_usecs;
+ bool rx_adaptive;
+
+ efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
+
+ coalesce->tx_coalesce_usecs = tx_usecs;
+ coalesce->tx_coalesce_usecs_irq = tx_usecs;
+ coalesce->rx_coalesce_usecs = rx_usecs;
+ coalesce->rx_coalesce_usecs_irq = rx_usecs;
+ coalesce->use_adaptive_rx_coalesce = rx_adaptive;
+
+ return 0;
+}
+
+static int efx_ethtool_set_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ unsigned int tx_usecs, rx_usecs;
+ bool adaptive, rx_may_override_tx;
+ int rc;
+
+ if (coalesce->use_adaptive_tx_coalesce)
+ return -EINVAL;
+
+ efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
+
+ if (coalesce->rx_coalesce_usecs != rx_usecs)
+ rx_usecs = coalesce->rx_coalesce_usecs;
+ else
+ rx_usecs = coalesce->rx_coalesce_usecs_irq;
+
+ adaptive = coalesce->use_adaptive_rx_coalesce;
+
+ /* If channels are shared, TX IRQ moderation can be quietly
+ * overridden unless it is changed from its old value.
+ */
+ rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
+ coalesce->tx_coalesce_usecs_irq == tx_usecs);
+ if (coalesce->tx_coalesce_usecs != tx_usecs)
+ tx_usecs = coalesce->tx_coalesce_usecs;
+ else
+ tx_usecs = coalesce->tx_coalesce_usecs_irq;
+
+ rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
+ rx_may_override_tx);
+ if (rc != 0)
+ return rc;
+
+ efx_for_each_channel(channel, efx)
+ efx->type->push_irq_moderation(channel);
+
+ return 0;
+}
+
+static void efx_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
+ ring->rx_pending = efx->rxq_entries;
+ ring->tx_pending = efx->txq_entries;
+}
+
+static int efx_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ u32 txq_entries;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
+ ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
+ return -EINVAL;
+
+ if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
+ netif_err(efx, drv, efx->net_dev,
+ "RX queues cannot be smaller than %u\n",
+ EFX_RXQ_MIN_ENT);
+ return -EINVAL;
+ }
+
+ txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
+ if (txq_entries != ring->tx_pending)
+ netif_warn(efx, drv, efx->net_dev,
+ "increasing TX queue size to minimum of %u\n",
+ txq_entries);
+
+ return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
+}
+
+static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ u8 wanted_fc, old_fc;
+ u32 old_adv;
+ int rc = 0;
+
+ mutex_lock(&efx->mac_lock);
+
+ wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
+ (pause->tx_pause ? EFX_FC_TX : 0) |
+ (pause->autoneg ? EFX_FC_AUTO : 0));
+
+ if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Flow control unsupported: tx ON rx OFF\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Autonegotiation is disabled\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Hook for Falcon bug 11482 workaround */
+ if (efx->type->prepare_enable_fc_tx &&
+ (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
+ efx->type->prepare_enable_fc_tx(efx);
+
+ old_adv = efx->link_advertising;
+ old_fc = efx->wanted_fc;
+ efx_link_set_wanted_fc(efx, wanted_fc);
+ if (efx->link_advertising != old_adv ||
+ (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to advertise requested flow "
+ "control setting\n");
+ goto out;
+ }
+ }
+
+ /* Reconfigure the MAC. The PHY *may* generate a link state change event
+ * if the user just changed the advertised capabilities, but there's no
+ * harm doing this twice */
+ efx->type->reconfigure_mac(efx);
+
+out:
+ mutex_unlock(&efx->mac_lock);
+
+ return rc;
+}
+
+static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
+ pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
+ pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
+}
+
+static void efx_ethtool_get_wol(struct net_device *net_dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->type->get_wol(efx, wol);
+}
+
+
+static int efx_ethtool_set_wol(struct net_device *net_dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->type->set_wol(efx, wol->wolopts);
+}
+
+static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx->type->map_reset_flags(flags);
+ if (rc < 0)
+ return rc;
+
+ return efx_reset(efx, rc);
+}
+
+/* MAC address mask including only I/G bit */
+static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
+
+#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
+#define PORT_FULL_MASK ((__force __be16)~0)
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+
+static int efx_ethtool_get_class_rule(struct efx_nic *efx,
+ struct ethtool_rx_flow_spec *rule)
+{
+ struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+ struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+ struct efx_filter_spec spec;
+ int rc;
+
+ rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
+ rule->location, &spec);
+ if (rc)
+ return rc;
+
+ if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+ rule->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ rule->ring_cookie = spec.dmaq_id;
+
+ if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
+ spec.ether_type == htons(ETH_P_IP) &&
+ (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
+ (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+ !(spec.match_flags &
+ ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
+ rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+ TCP_V4_FLOW : UDP_V4_FLOW);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+ ip_entry->ip4dst = spec.loc_host[0];
+ ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+ ip_entry->ip4src = spec.rem_host[0];
+ ip_mask->ip4src = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
+ ip_entry->pdst = spec.loc_port;
+ ip_mask->pdst = PORT_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
+ ip_entry->psrc = spec.rem_port;
+ ip_mask->psrc = PORT_FULL_MASK;
+ }
+ } else if (!(spec.match_flags &
+ ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
+ EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_OUTER_VID))) {
+ rule->flow_type = ETHER_FLOW;
+ if (spec.match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
+ ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
+ eth_broadcast_addr(mac_mask->h_dest);
+ else
+ ether_addr_copy(mac_mask->h_dest,
+ mac_addr_ig_mask);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
+ ether_addr_copy(mac_entry->h_source, spec.rem_mac);
+ eth_broadcast_addr(mac_mask->h_source);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
+ mac_entry->h_proto = spec.ether_type;
+ mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ } else {
+ /* The above should handle all filters that we insert */
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
+ rule->flow_type |= FLOW_EXT;
+ rule->h_ext.vlan_tci = spec.outer_vid;
+ rule->m_ext.vlan_tci = htons(0xfff);
+ }
+
+ return rc;
+}
+
+static int
+efx_ethtool_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = efx->n_rx_channels;
+ return 0;
+
+ case ETHTOOL_GRXFH: {
+ unsigned min_revision = 0;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ min_revision = EFX_REV_FALCON_B0;
+ break;
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ min_revision = EFX_REV_SIENA_A0;
+ break;
+ default:
+ break;
+ }
+ if (efx_nic_rev(efx) < min_revision)
+ info->data = 0;
+ return 0;
+ }
+
+ case ETHTOOL_GRXCLSRLCNT:
+ info->data = efx_filter_get_rx_id_limit(efx);
+ if (info->data == 0)
+ return -EOPNOTSUPP;
+ info->data |= RX_CLS_LOC_SPECIAL;
+ info->rule_cnt =
+ efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
+ return 0;
+
+ case ETHTOOL_GRXCLSRULE:
+ if (efx_filter_get_rx_id_limit(efx) == 0)
+ return -EOPNOTSUPP;
+ return efx_ethtool_get_class_rule(efx, &info->fs);
+
+ case ETHTOOL_GRXCLSRLALL: {
+ s32 rc;
+ info->data = efx_filter_get_rx_id_limit(efx);
+ if (info->data == 0)
+ return -EOPNOTSUPP;
+ rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
+ rule_locs, info->rule_cnt);
+ if (rc < 0)
+ return rc;
+ info->rule_cnt = rc;
+ return 0;
+ }
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int efx_ethtool_set_class_rule(struct efx_nic *efx,
+ struct ethtool_rx_flow_spec *rule)
+{
+ struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+ struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+ struct efx_filter_spec spec;
+ int rc;
+
+ /* Check that user wants us to choose the location */
+ if (rule->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+
+ /* Range-check ring_cookie */
+ if (rule->ring_cookie >= efx->n_rx_channels &&
+ rule->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ /* Check for unsupported extensions */
+ if ((rule->flow_type & FLOW_EXT) &&
+ (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
+ rule->m_ext.data[1]))
+ return -EINVAL;
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
+ efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
+ (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
+ EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
+
+ switch (rule->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO);
+ spec.ether_type = htons(ETH_P_IP);
+ spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
+ IPPROTO_TCP : IPPROTO_UDP);
+ if (ip_mask->ip4dst) {
+ if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ spec.loc_host[0] = ip_entry->ip4dst;
+ }
+ if (ip_mask->ip4src) {
+ if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ spec.rem_host[0] = ip_entry->ip4src;
+ }
+ if (ip_mask->pdst) {
+ if (ip_mask->pdst != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ spec.loc_port = ip_entry->pdst;
+ }
+ if (ip_mask->psrc) {
+ if (ip_mask->psrc != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ spec.rem_port = ip_entry->psrc;
+ }
+ if (ip_mask->tos)
+ return -EINVAL;
+ break;
+
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(mac_mask->h_dest)) {
+ if (ether_addr_equal(mac_mask->h_dest,
+ mac_addr_ig_mask))
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ else if (is_broadcast_ether_addr(mac_mask->h_dest))
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ else
+ return -EINVAL;
+ ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
+ }
+ if (!is_zero_ether_addr(mac_mask->h_source)) {
+ if (!is_broadcast_ether_addr(mac_mask->h_source))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
+ ether_addr_copy(spec.rem_mac, mac_entry->h_source);
+ }
+ if (mac_mask->h_proto) {
+ if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = mac_entry->h_proto;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
+ if (rule->m_ext.vlan_tci != htons(0xfff))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec.outer_vid = rule->h_ext.vlan_tci;
+ }
+
+ rc = efx_filter_insert_filter(efx, &spec, true);
+ if (rc < 0)
+ return rc;
+
+ rule->location = rc;
+ return 0;
+}
+
+static int efx_ethtool_set_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx_filter_get_rx_id_limit(efx) == 0)
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return efx_ethtool_set_class_rule(efx, &info->fs);
+
+ case ETHTOOL_SRXCLSRLDEL:
+ return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
+ info->fs.location);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return ((efx_nic_rev(efx) < EFX_REV_FALCON_B0 ||
+ efx->n_rx_channels == 1) ?
+ 0 : ARRAY_SIZE(efx->rx_indir_table));
+}
+
+static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (indir)
+ memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+ return 0;
+}
+
+static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ /* We do not allow change in unsupported parameters */
+ if (key ||
+ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+ if (!indir)
+ return 0;
+ memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
+ efx->type->rx_push_rss_config(efx);
+ return 0;
+}
+
+static int efx_ethtool_get_ts_info(struct net_device *net_dev,
+ struct ethtool_ts_info *ts_info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ /* Software capabilities */
+ ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE);
+ ts_info->phc_index = -1;
+
+ efx_ptp_get_ts_info(efx, ts_info);
+ return 0;
+}
+
+static int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int ret;
+
+ if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&efx->mac_lock);
+ ret = efx->phy_op->get_module_eeprom(efx, ee, data);
+ mutex_unlock(&efx->mac_lock);
+
+ return ret;
+}
+
+static int efx_ethtool_get_module_info(struct net_device *net_dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ int ret;
+
+ if (!efx->phy_op || !efx->phy_op->get_module_info)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&efx->mac_lock);
+ ret = efx->phy_op->get_module_info(efx, modinfo);
+ mutex_unlock(&efx->mac_lock);
+
+ return ret;
+}
+
+const struct ethtool_ops efx_ethtool_ops = {
+ .get_settings = efx_ethtool_get_settings,
+ .set_settings = efx_ethtool_set_settings,
+ .get_drvinfo = efx_ethtool_get_drvinfo,
+ .get_regs_len = efx_ethtool_get_regs_len,
+ .get_regs = efx_ethtool_get_regs,
+ .get_msglevel = efx_ethtool_get_msglevel,
+ .set_msglevel = efx_ethtool_set_msglevel,
+ .nway_reset = efx_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = efx_ethtool_get_coalesce,
+ .set_coalesce = efx_ethtool_set_coalesce,
+ .get_ringparam = efx_ethtool_get_ringparam,
+ .set_ringparam = efx_ethtool_set_ringparam,
+ .get_pauseparam = efx_ethtool_get_pauseparam,
+ .set_pauseparam = efx_ethtool_set_pauseparam,
+ .get_sset_count = efx_ethtool_get_sset_count,
+ .self_test = efx_ethtool_self_test,
+ .get_strings = efx_ethtool_get_strings,
+ .set_phys_id = efx_ethtool_phys_id,
+ .get_ethtool_stats = efx_ethtool_get_stats,
+ .get_wol = efx_ethtool_get_wol,
+ .set_wol = efx_ethtool_set_wol,
+ .reset = efx_ethtool_reset,
+ .get_rxnfc = efx_ethtool_get_rxnfc,
+ .set_rxnfc = efx_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
+ .get_rxfh = efx_ethtool_get_rxfh,
+ .set_rxfh = efx_ethtool_set_rxfh,
+ .get_ts_info = efx_ethtool_get_ts_info,
+ .get_module_info = efx_ethtool_get_module_info,
+ .get_module_eeprom = efx_ethtool_get_module_eeprom,
+};
diff --git a/kernel/drivers/net/ethernet/sfc/falcon.c b/kernel/drivers/net/ethernet/sfc/falcon.c
new file mode 100644
index 000000000..f166c8ef3
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/falcon.c
@@ -0,0 +1,2892 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/i2c.h>
+#include <linux/mii.h>
+#include <linux/slab.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "farch_regs.h"
+#include "io.h"
+#include "phy.h"
+#include "workarounds.h"
+#include "selftest.h"
+#include "mdio_10g.h"
+
+/* Hardware control for SFC4000 (aka Falcon). */
+
+/**************************************************************************
+ *
+ * NIC stats
+ *
+ **************************************************************************
+ */
+
+#define FALCON_MAC_STATS_SIZE 0x100
+
+#define XgRxOctets_offset 0x0
+#define XgRxOctets_WIDTH 48
+#define XgRxOctetsOK_offset 0x8
+#define XgRxOctetsOK_WIDTH 48
+#define XgRxPkts_offset 0x10
+#define XgRxPkts_WIDTH 32
+#define XgRxPktsOK_offset 0x14
+#define XgRxPktsOK_WIDTH 32
+#define XgRxBroadcastPkts_offset 0x18
+#define XgRxBroadcastPkts_WIDTH 32
+#define XgRxMulticastPkts_offset 0x1C
+#define XgRxMulticastPkts_WIDTH 32
+#define XgRxUnicastPkts_offset 0x20
+#define XgRxUnicastPkts_WIDTH 32
+#define XgRxUndersizePkts_offset 0x24
+#define XgRxUndersizePkts_WIDTH 32
+#define XgRxOversizePkts_offset 0x28
+#define XgRxOversizePkts_WIDTH 32
+#define XgRxJabberPkts_offset 0x2C
+#define XgRxJabberPkts_WIDTH 32
+#define XgRxUndersizeFCSerrorPkts_offset 0x30
+#define XgRxUndersizeFCSerrorPkts_WIDTH 32
+#define XgRxDropEvents_offset 0x34
+#define XgRxDropEvents_WIDTH 32
+#define XgRxFCSerrorPkts_offset 0x38
+#define XgRxFCSerrorPkts_WIDTH 32
+#define XgRxAlignError_offset 0x3C
+#define XgRxAlignError_WIDTH 32
+#define XgRxSymbolError_offset 0x40
+#define XgRxSymbolError_WIDTH 32
+#define XgRxInternalMACError_offset 0x44
+#define XgRxInternalMACError_WIDTH 32
+#define XgRxControlPkts_offset 0x48
+#define XgRxControlPkts_WIDTH 32
+#define XgRxPausePkts_offset 0x4C
+#define XgRxPausePkts_WIDTH 32
+#define XgRxPkts64Octets_offset 0x50
+#define XgRxPkts64Octets_WIDTH 32
+#define XgRxPkts65to127Octets_offset 0x54
+#define XgRxPkts65to127Octets_WIDTH 32
+#define XgRxPkts128to255Octets_offset 0x58
+#define XgRxPkts128to255Octets_WIDTH 32
+#define XgRxPkts256to511Octets_offset 0x5C
+#define XgRxPkts256to511Octets_WIDTH 32
+#define XgRxPkts512to1023Octets_offset 0x60
+#define XgRxPkts512to1023Octets_WIDTH 32
+#define XgRxPkts1024to15xxOctets_offset 0x64
+#define XgRxPkts1024to15xxOctets_WIDTH 32
+#define XgRxPkts15xxtoMaxOctets_offset 0x68
+#define XgRxPkts15xxtoMaxOctets_WIDTH 32
+#define XgRxLengthError_offset 0x6C
+#define XgRxLengthError_WIDTH 32
+#define XgTxPkts_offset 0x80
+#define XgTxPkts_WIDTH 32
+#define XgTxOctets_offset 0x88
+#define XgTxOctets_WIDTH 48
+#define XgTxMulticastPkts_offset 0x90
+#define XgTxMulticastPkts_WIDTH 32
+#define XgTxBroadcastPkts_offset 0x94
+#define XgTxBroadcastPkts_WIDTH 32
+#define XgTxUnicastPkts_offset 0x98
+#define XgTxUnicastPkts_WIDTH 32
+#define XgTxControlPkts_offset 0x9C
+#define XgTxControlPkts_WIDTH 32
+#define XgTxPausePkts_offset 0xA0
+#define XgTxPausePkts_WIDTH 32
+#define XgTxPkts64Octets_offset 0xA4
+#define XgTxPkts64Octets_WIDTH 32
+#define XgTxPkts65to127Octets_offset 0xA8
+#define XgTxPkts65to127Octets_WIDTH 32
+#define XgTxPkts128to255Octets_offset 0xAC
+#define XgTxPkts128to255Octets_WIDTH 32
+#define XgTxPkts256to511Octets_offset 0xB0
+#define XgTxPkts256to511Octets_WIDTH 32
+#define XgTxPkts512to1023Octets_offset 0xB4
+#define XgTxPkts512to1023Octets_WIDTH 32
+#define XgTxPkts1024to15xxOctets_offset 0xB8
+#define XgTxPkts1024to15xxOctets_WIDTH 32
+#define XgTxPkts1519toMaxOctets_offset 0xBC
+#define XgTxPkts1519toMaxOctets_WIDTH 32
+#define XgTxUndersizePkts_offset 0xC0
+#define XgTxUndersizePkts_WIDTH 32
+#define XgTxOversizePkts_offset 0xC4
+#define XgTxOversizePkts_WIDTH 32
+#define XgTxNonTcpUdpPkt_offset 0xC8
+#define XgTxNonTcpUdpPkt_WIDTH 16
+#define XgTxMacSrcErrPkt_offset 0xCC
+#define XgTxMacSrcErrPkt_WIDTH 16
+#define XgTxIpSrcErrPkt_offset 0xD0
+#define XgTxIpSrcErrPkt_WIDTH 16
+#define XgDmaDone_offset 0xD4
+#define XgDmaDone_WIDTH 32
+
+#define FALCON_XMAC_STATS_DMA_FLAG(efx) \
+ (*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
+
+#define FALCON_DMA_STAT(ext_name, hw_name) \
+ [FALCON_STAT_ ## ext_name] = \
+ { #ext_name, \
+ /* 48-bit stats are zero-padded to 64 on DMA */ \
+ hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH, \
+ hw_name ## _ ## offset }
+#define FALCON_OTHER_STAT(ext_name) \
+ [FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+#define GENERIC_SW_STAT(ext_name) \
+ [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
+ FALCON_DMA_STAT(tx_bytes, XgTxOctets),
+ FALCON_DMA_STAT(tx_packets, XgTxPkts),
+ FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
+ FALCON_DMA_STAT(tx_control, XgTxControlPkts),
+ FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
+ FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
+ FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
+ FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
+ FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
+ FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
+ FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
+ FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
+ FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
+ FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
+ FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
+ FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
+ FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
+ FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
+ FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
+ FALCON_DMA_STAT(rx_bytes, XgRxOctets),
+ FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
+ FALCON_OTHER_STAT(rx_bad_bytes),
+ FALCON_DMA_STAT(rx_packets, XgRxPkts),
+ FALCON_DMA_STAT(rx_good, XgRxPktsOK),
+ FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
+ FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
+ FALCON_DMA_STAT(rx_control, XgRxControlPkts),
+ FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
+ FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
+ FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
+ FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
+ FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
+ FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
+ FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
+ FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
+ FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
+ FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
+ FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
+ FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
+ FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
+ FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
+ FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
+ FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
+ FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
+ FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
+ FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
+ FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
+ GENERIC_SW_STAT(rx_nodesc_trunc),
+ GENERIC_SW_STAT(rx_noskb_drops),
+};
+static const unsigned long falcon_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
+};
+
+/**************************************************************************
+ *
+ * Basic SPI command set and bit definitions
+ *
+ *************************************************************************/
+
+#define SPI_WRSR 0x01 /* Write status register */
+#define SPI_WRITE 0x02 /* Write data to memory array */
+#define SPI_READ 0x03 /* Read data from memory array */
+#define SPI_WRDI 0x04 /* Reset write enable latch */
+#define SPI_RDSR 0x05 /* Read status register */
+#define SPI_WREN 0x06 /* Set write enable latch */
+#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
+
+#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
+#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
+#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
+#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
+#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
+#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
+
+/**************************************************************************
+ *
+ * Non-volatile memory layout
+ *
+ **************************************************************************
+ */
+
+/* SFC4000 flash is partitioned into:
+ * 0-0x400 chip and board config (see struct falcon_nvconfig)
+ * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
+ * 0x8000-end boot code (mapped to PCI expansion ROM)
+ * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
+ * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
+ * 0-0x400 chip and board config
+ * configurable VPD
+ * 0x800-0x1800 boot config
+ * Aside from the chip and board config, all of these are optional and may
+ * be absent or truncated depending on the devices used.
+ */
+#define FALCON_NVCONFIG_END 0x400U
+#define FALCON_FLASH_BOOTCODE_START 0x8000U
+#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
+#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
+
+/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
+struct falcon_nvconfig_board_v2 {
+ __le16 nports;
+ u8 port0_phy_addr;
+ u8 port0_phy_type;
+ u8 port1_phy_addr;
+ u8 port1_phy_type;
+ __le16 asic_sub_revision;
+ __le16 board_revision;
+} __packed;
+
+/* Board configuration v3 extra information */
+struct falcon_nvconfig_board_v3 {
+ __le32 spi_device_type[2];
+} __packed;
+
+/* Bit numbers for spi_device_type */
+#define SPI_DEV_TYPE_SIZE_LBN 0
+#define SPI_DEV_TYPE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
+#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
+#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
+#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
+#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
+#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
+#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_FIELD(type, field) \
+ (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
+
+#define FALCON_NVCONFIG_OFFSET 0x300
+
+#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
+struct falcon_nvconfig {
+ efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
+ u8 mac_address[2][8]; /* 0x310 */
+ efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
+ efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
+ efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
+ efx_oword_t hw_init_reg; /* 0x350 */
+ efx_oword_t nic_stat_reg; /* 0x360 */
+ efx_oword_t glb_ctl_reg; /* 0x370 */
+ efx_oword_t srm_cfg_reg; /* 0x380 */
+ efx_oword_t spare_reg; /* 0x390 */
+ __le16 board_magic_num; /* 0x3A0 */
+ __le16 board_struct_ver;
+ __le16 board_checksum;
+ struct falcon_nvconfig_board_v2 board_v2;
+ efx_oword_t ee_base_page_reg; /* 0x3B0 */
+ struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
+} __packed;
+
+/*************************************************************************/
+
+static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
+static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
+
+static const unsigned int
+/* "Large" EEPROM device: Atmel AT25640 or similar
+ * 8 KB, 16-bit address, 32 B write block */
+large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
+ | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
+ | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
+/* Default flash device: Atmel AT25F1024
+ * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
+default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
+ | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
+ | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
+ | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
+ | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
+
+/**************************************************************************
+ *
+ * I2C bus - this is a bit-bashing interface using GPIO pins
+ * Note that it uses the output enables to tristate the outputs
+ * SDA is the data pin and SCL is the clock
+ *
+ **************************************************************************
+ */
+static void falcon_setsda(void *data, int state)
+{
+ struct efx_nic *efx = (struct efx_nic *)data;
+ efx_oword_t reg;
+
+ efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
+ efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
+}
+
+static void falcon_setscl(void *data, int state)
+{
+ struct efx_nic *efx = (struct efx_nic *)data;
+ efx_oword_t reg;
+
+ efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
+ efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
+}
+
+static int falcon_getsda(void *data)
+{
+ struct efx_nic *efx = (struct efx_nic *)data;
+ efx_oword_t reg;
+
+ efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+ return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
+}
+
+static int falcon_getscl(void *data)
+{
+ struct efx_nic *efx = (struct efx_nic *)data;
+ efx_oword_t reg;
+
+ efx_reado(efx, &reg, FR_AB_GPIO_CTL);
+ return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
+}
+
+static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
+ .setsda = falcon_setsda,
+ .setscl = falcon_setscl,
+ .getsda = falcon_getsda,
+ .getscl = falcon_getscl,
+ .udelay = 5,
+ /* Wait up to 50 ms for slave to let us pull SCL high */
+ .timeout = DIV_ROUND_UP(HZ, 20),
+};
+
+static void falcon_push_irq_moderation(struct efx_channel *channel)
+{
+ efx_dword_t timer_cmd;
+ struct efx_nic *efx = channel->efx;
+
+ /* Set timer register */
+ if (channel->irq_moderation) {
+ EFX_POPULATE_DWORD_2(timer_cmd,
+ FRF_AB_TC_TIMER_MODE,
+ FFE_BB_TIMER_MODE_INT_HLDOFF,
+ FRF_AB_TC_TIMER_VAL,
+ channel->irq_moderation - 1);
+ } else {
+ EFX_POPULATE_DWORD_2(timer_cmd,
+ FRF_AB_TC_TIMER_MODE,
+ FFE_BB_TIMER_MODE_DIS,
+ FRF_AB_TC_TIMER_VAL, 0);
+ }
+ BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
+ efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
+ channel->channel);
+}
+
+static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
+
+static void falcon_prepare_flush(struct efx_nic *efx)
+{
+ falcon_deconfigure_mac_wrapper(efx);
+
+ /* Wait for the tx and rx fifo's to get to the next packet boundary
+ * (~1ms without back-pressure), then to drain the remainder of the
+ * fifo's at data path speeds (negligible), with a healthy margin. */
+ msleep(10);
+}
+
+/* Acknowledge a legacy interrupt from Falcon
+ *
+ * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
+ *
+ * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
+ * BIU. Interrupt acknowledge is read sensitive so must write instead
+ * (then read to ensure the BIU collector is flushed)
+ *
+ * NB most hardware supports MSI interrupts
+ */
+static inline void falcon_irq_ack_a1(struct efx_nic *efx)
+{
+ efx_dword_t reg;
+
+ EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
+ efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
+ efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
+}
+
+static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ int syserr;
+ int queues;
+
+ /* Check to see if this is our interrupt. If it isn't, we
+ * exit without having touched the hardware.
+ */
+ if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d not for me\n", irq,
+ raw_smp_processor_id());
+ return IRQ_NONE;
+ }
+ efx->last_irq_cpu = raw_smp_processor_id();
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
+ /* Check to see if we have a serious error condition */
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_farch_fatal_interrupt(efx);
+
+ /* Determine interrupting queues, clear interrupt status
+ * register and acknowledge the device interrupt.
+ */
+ BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
+ queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
+ EFX_ZERO_OWORD(*int_ker);
+ wmb(); /* Ensure the vector is cleared before interrupt ack */
+ falcon_irq_ack_a1(efx);
+
+ if (queues & 1)
+ efx_schedule_channel_irq(efx_get_channel(efx, 0));
+ if (queues & 2)
+ efx_schedule_channel_irq(efx_get_channel(efx, 1));
+ return IRQ_HANDLED;
+}
+
+/**************************************************************************
+ *
+ * RSS
+ *
+ **************************************************************************
+ */
+
+static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ efx_farch_rx_push_indir_table(efx);
+}
+
+/**************************************************************************
+ *
+ * EEPROM/flash
+ *
+ **************************************************************************
+ */
+
+#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
+
+static int falcon_spi_poll(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
+ return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
+}
+
+/* Wait for SPI command completion */
+static int falcon_spi_wait(struct efx_nic *efx)
+{
+ /* Most commands will finish quickly, so we start polling at
+ * very short intervals. Sometimes the command may have to
+ * wait for VPD or expansion ROM access outside of our
+ * control, so we allow up to 100 ms. */
+ unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (!falcon_spi_poll(efx))
+ return 0;
+ udelay(10);
+ }
+
+ for (;;) {
+ if (!falcon_spi_poll(efx))
+ return 0;
+ if (time_after_eq(jiffies, timeout)) {
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for SPI\n");
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
+static int
+falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
+ unsigned int command, int address,
+ const void *in, void *out, size_t len)
+{
+ bool addressed = (address >= 0);
+ bool reading = (out != NULL);
+ efx_oword_t reg;
+ int rc;
+
+ /* Input validation */
+ if (len > FALCON_SPI_MAX_LEN)
+ return -EINVAL;
+
+ /* Check that previous command is not still running */
+ rc = falcon_spi_poll(efx);
+ if (rc)
+ return rc;
+
+ /* Program address register, if we have an address */
+ if (addressed) {
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
+ efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
+ }
+
+ /* Program data register, if we have data */
+ if (in != NULL) {
+ memcpy(&reg, in, len);
+ efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
+ }
+
+ /* Issue read/write command */
+ EFX_POPULATE_OWORD_7(reg,
+ FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
+ FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
+ FRF_AB_EE_SPI_HCMD_DABCNT, len,
+ FRF_AB_EE_SPI_HCMD_READ, reading,
+ FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
+ FRF_AB_EE_SPI_HCMD_ADBCNT,
+ (addressed ? spi->addr_len : 0),
+ FRF_AB_EE_SPI_HCMD_ENC, command);
+ efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
+
+ /* Wait for read/write to complete */
+ rc = falcon_spi_wait(efx);
+ if (rc)
+ return rc;
+
+ /* Read data */
+ if (out != NULL) {
+ efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
+ memcpy(out, &reg, len);
+ }
+
+ return 0;
+}
+
+static inline u8
+falcon_spi_munge_command(const struct falcon_spi_device *spi,
+ const u8 command, const unsigned int address)
+{
+ return command | (((address >> 8) & spi->munge_address) << 3);
+}
+
+static int
+falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
+ loff_t start, size_t len, size_t *retlen, u8 *buffer)
+{
+ size_t block_len, pos = 0;
+ unsigned int command;
+ int rc = 0;
+
+ while (pos < len) {
+ block_len = min(len - pos, FALCON_SPI_MAX_LEN);
+
+ command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
+ rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
+ buffer + pos, block_len);
+ if (rc)
+ break;
+ pos += block_len;
+
+ /* Avoid locking up the system */
+ cond_resched();
+ if (signal_pending(current)) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+ if (retlen)
+ *retlen = pos;
+ return rc;
+}
+
+#ifdef CONFIG_SFC_MTD
+
+struct falcon_mtd_partition {
+ struct efx_mtd_partition common;
+ const struct falcon_spi_device *spi;
+ size_t offset;
+};
+
+#define to_falcon_mtd_partition(mtd) \
+ container_of(mtd, struct falcon_mtd_partition, common.mtd)
+
+static size_t
+falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
+{
+ return min(FALCON_SPI_MAX_LEN,
+ (spi->block_size - (start & (spi->block_size - 1))));
+}
+
+/* Wait up to 10 ms for buffered write completion */
+static int
+falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
+{
+ unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
+ u8 status;
+ int rc;
+
+ for (;;) {
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ if (time_after_eq(jiffies, timeout)) {
+ netif_err(efx, hw, efx->net_dev,
+ "SPI write timeout on device %d"
+ " last status=0x%02x\n",
+ spi->device_id, status);
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
+static int
+falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
+ loff_t start, size_t len, size_t *retlen, const u8 *buffer)
+{
+ u8 verify_buffer[FALCON_SPI_MAX_LEN];
+ size_t block_len, pos = 0;
+ unsigned int command;
+ int rc = 0;
+
+ while (pos < len) {
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ break;
+
+ block_len = min(len - pos,
+ falcon_spi_write_limit(spi, start + pos));
+ command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
+ rc = falcon_spi_cmd(efx, spi, command, start + pos,
+ buffer + pos, NULL, block_len);
+ if (rc)
+ break;
+
+ rc = falcon_spi_wait_write(efx, spi);
+ if (rc)
+ break;
+
+ command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
+ rc = falcon_spi_cmd(efx, spi, command, start + pos,
+ NULL, verify_buffer, block_len);
+ if (memcmp(verify_buffer, buffer + pos, block_len)) {
+ rc = -EIO;
+ break;
+ }
+
+ pos += block_len;
+
+ /* Avoid locking up the system */
+ cond_resched();
+ if (signal_pending(current)) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+ if (retlen)
+ *retlen = pos;
+ return rc;
+}
+
+static int
+falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
+{
+ const struct falcon_spi_device *spi = part->spi;
+ struct efx_nic *efx = part->common.mtd.priv;
+ u8 status;
+ int rc, i;
+
+ /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
+ for (i = 0; i < 40; i++) {
+ __set_current_state(uninterruptible ?
+ TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 10);
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ pr_err("%s: timed out waiting for %s\n",
+ part->common.name, part->common.dev_type_name);
+ return -ETIMEDOUT;
+}
+
+static int
+falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
+{
+ const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
+ SPI_STATUS_BP0);
+ u8 status;
+ int rc;
+
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+
+ if (!(status & unlock_mask))
+ return 0; /* already unlocked */
+
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+
+ status &= ~unlock_mask;
+ rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
+ NULL, sizeof(status));
+ if (rc)
+ return rc;
+ rc = falcon_spi_wait_write(efx, spi);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+#define FALCON_SPI_VERIFY_BUF_LEN 16
+
+static int
+falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
+{
+ const struct falcon_spi_device *spi = part->spi;
+ struct efx_nic *efx = part->common.mtd.priv;
+ unsigned pos, block_len;
+ u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
+ u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
+ int rc;
+
+ if (len != spi->erase_size)
+ return -EINVAL;
+
+ if (spi->erase_command == 0)
+ return -EOPNOTSUPP;
+
+ rc = falcon_spi_unlock(efx, spi);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
+ NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_slow_wait(part, false);
+
+ /* Verify the entire region has been wiped */
+ memset(empty, 0xff, sizeof(empty));
+ for (pos = 0; pos < len; pos += block_len) {
+ block_len = min(len - pos, sizeof(buffer));
+ rc = falcon_spi_read(efx, spi, start + pos, block_len,
+ NULL, buffer);
+ if (rc)
+ return rc;
+ if (memcmp(empty, buffer, block_len))
+ return -EIO;
+
+ /* Avoid locking up the system */
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ return rc;
+}
+
+static void falcon_mtd_rename(struct efx_mtd_partition *part)
+{
+ struct efx_nic *efx = part->mtd.priv;
+
+ snprintf(part->name, sizeof(part->name), "%s %s",
+ efx->name, part->type_name);
+}
+
+static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_read(efx, part->spi, part->offset + start,
+ len, retlen, buffer);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_erase(part, part->offset + start, len);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_write(efx, part->spi, part->offset + start,
+ len, retlen, buffer);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_sync(struct mtd_info *mtd)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ mutex_lock(&nic_data->spi_lock);
+ rc = falcon_spi_slow_wait(part, true);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_probe(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct falcon_mtd_partition *parts;
+ struct falcon_spi_device *spi;
+ size_t n_parts;
+ int rc = -ENODEV;
+
+ ASSERT_RTNL();
+
+ /* Allocate space for maximum number of partitions */
+ parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+ n_parts = 0;
+
+ spi = &nic_data->spi_flash;
+ if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
+ parts[n_parts].spi = spi;
+ parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
+ parts[n_parts].common.dev_type_name = "flash";
+ parts[n_parts].common.type_name = "sfc_flash_bootrom";
+ parts[n_parts].common.mtd.type = MTD_NORFLASH;
+ parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
+ parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
+ parts[n_parts].common.mtd.erasesize = spi->erase_size;
+ n_parts++;
+ }
+
+ spi = &nic_data->spi_eeprom;
+ if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
+ parts[n_parts].spi = spi;
+ parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
+ parts[n_parts].common.dev_type_name = "EEPROM";
+ parts[n_parts].common.type_name = "sfc_bootconfig";
+ parts[n_parts].common.mtd.type = MTD_RAM;
+ parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
+ parts[n_parts].common.mtd.size =
+ min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
+ FALCON_EEPROM_BOOTCONFIG_START;
+ parts[n_parts].common.mtd.erasesize = spi->erase_size;
+ n_parts++;
+ }
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+/**************************************************************************
+ *
+ * XMAC operations
+ *
+ **************************************************************************
+ */
+
+/* Configure the XAUI driver that is an output from Falcon */
+static void falcon_setup_xaui(struct efx_nic *efx)
+{
+ efx_oword_t sdctl, txdrv;
+
+ /* Move the XAUI into low power, unless there is no PHY, in
+ * which case the XAUI will have to drive a cable. */
+ if (efx->phy_type == PHY_TYPE_NONE)
+ return;
+
+ efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
+
+ EFX_POPULATE_OWORD_8(txdrv,
+ FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
+ efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
+}
+
+int falcon_reset_xaui(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+ int count;
+
+ /* Don't fetch MAC statistics over an XMAC reset */
+ WARN_ON(nic_data->stats_disable_count == 0);
+
+ /* Start reset sequence */
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
+ efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
+
+ /* Wait up to 10 ms for completion, then reinitialise */
+ for (count = 0; count < 1000; count++) {
+ efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
+ if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
+ EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
+ falcon_setup_xaui(efx);
+ return 0;
+ }
+ udelay(10);
+ }
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XAUI/XGXS reset\n");
+ return -ETIMEDOUT;
+}
+
+static void falcon_ack_status_intr(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+
+ if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
+ return;
+
+ /* We expect xgmii faults if the wireside link is down */
+ if (!efx->link_state.up)
+ return;
+
+ /* We can only use this interrupt to signal the negative edge of
+ * xaui_align [we have to poll the positive edge]. */
+ if (nic_data->xmac_poll_required)
+ return;
+
+ efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
+}
+
+static bool falcon_xgxs_link_ok(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ bool align_done, link_ok = false;
+ int sync_status;
+
+ /* Read link status */
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
+ sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
+ if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
+ link_ok = true;
+
+ /* Clear link status ready for next read */
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
+ efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ return link_ok;
+}
+
+static bool falcon_xmac_link_ok(struct efx_nic *efx)
+{
+ /*
+ * Check MAC's XGXS link status except when using XGMII loopback
+ * which bypasses the XGXS block.
+ * If possible, check PHY's XGXS link status except when using
+ * MAC loopback.
+ */
+ return (efx->loopback_mode == LOOPBACK_XGMII ||
+ falcon_xgxs_link_ok(efx)) &&
+ (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
+ LOOPBACK_INTERNAL(efx) ||
+ efx_mdio_phyxgxs_lane_sync(efx));
+}
+
+static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
+{
+ unsigned int max_frame_len;
+ efx_oword_t reg;
+ bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
+ bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
+
+ /* Configure MAC - cut-thru mode is hard wired on */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AB_XM_RX_JUMBO_MODE, 1,
+ FRF_AB_XM_TX_STAT_EN, 1,
+ FRF_AB_XM_RX_STAT_EN, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
+
+ /* Configure TX */
+ EFX_POPULATE_OWORD_6(reg,
+ FRF_AB_XM_TXEN, 1,
+ FRF_AB_XM_TX_PRMBL, 1,
+ FRF_AB_XM_AUTO_PAD, 1,
+ FRF_AB_XM_TXCRC, 1,
+ FRF_AB_XM_FCNTL, tx_fc,
+ FRF_AB_XM_IPG, 0x3);
+ efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
+
+ /* Configure RX */
+ EFX_POPULATE_OWORD_5(reg,
+ FRF_AB_XM_RXEN, 1,
+ FRF_AB_XM_AUTO_DEPAD, 0,
+ FRF_AB_XM_ACPT_ALL_MCAST, 1,
+ FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
+ FRF_AB_XM_PASS_CRC_ERR, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
+
+ /* Set frame length */
+ max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
+ efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
+ FRF_AB_XM_TX_JUMBO_MODE, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
+
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
+ FRF_AB_XM_DIS_FCNTL, !rx_fc);
+ efx_writeo(efx, &reg, FR_AB_XM_FC);
+
+ /* Set MAC address */
+ memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
+ efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
+ memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
+ efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
+}
+
+static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
+ bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
+ bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
+ bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
+
+ /* XGXS block is flaky and will need to be reset if moving
+ * into our out of XGMII, XGXS or XAUI loopbacks. */
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
+ old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
+
+ efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
+
+ /* The PHY driver may have turned XAUI off */
+ if ((xgxs_loopback != old_xgxs_loopback) ||
+ (xaui_loopback != old_xaui_loopback) ||
+ (xgmii_loopback != old_xgmii_loopback))
+ falcon_reset_xaui(efx);
+
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
+ (xgxs_loopback || xaui_loopback) ?
+ FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
+ efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
+ efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
+}
+
+
+/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
+static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
+{
+ bool mac_up = falcon_xmac_link_ok(efx);
+
+ if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
+ efx_phy_mode_disabled(efx->phy_mode))
+ /* XAUI link is expected to be down */
+ return mac_up;
+
+ falcon_stop_nic_stats(efx);
+
+ while (!mac_up && tries) {
+ netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
+ falcon_reset_xaui(efx);
+ udelay(200);
+
+ mac_up = falcon_xmac_link_ok(efx);
+ --tries;
+ }
+
+ falcon_start_nic_stats(efx);
+
+ return mac_up;
+}
+
+static bool falcon_xmac_check_fault(struct efx_nic *efx)
+{
+ return !falcon_xmac_link_ok_retry(efx, 5);
+}
+
+static int falcon_reconfigure_xmac(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ efx_farch_filter_sync_rx_mode(efx);
+
+ falcon_reconfigure_xgxs_core(efx);
+ falcon_reconfigure_xmac_core(efx);
+
+ falcon_reconfigure_mac_wrapper(efx);
+
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
+ falcon_ack_status_intr(efx);
+
+ return 0;
+}
+
+static void falcon_poll_xmac(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ /* We expect xgmii faults if the wireside link is down */
+ if (!efx->link_state.up || !nic_data->xmac_poll_required)
+ return;
+
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
+ falcon_ack_status_intr(efx);
+}
+
+/**************************************************************************
+ *
+ * MAC wrapper
+ *
+ **************************************************************************
+ */
+
+static void falcon_push_multicast_hash(struct efx_nic *efx)
+{
+ union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
+ efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
+}
+
+static void falcon_reset_macs(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg, mac_ctrl;
+ int count;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
+ /* It's not safe to use GLB_CTL_REG to reset the
+ * macs, so instead use the internal MAC resets
+ */
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
+
+ for (count = 0; count < 10000; count++) {
+ efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
+ if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
+ 0)
+ return;
+ udelay(10);
+ }
+
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XMAC core reset\n");
+ }
+
+ /* Mac stats will fail whist the TX fifo is draining */
+ WARN_ON(nic_data->stats_disable_count == 0);
+
+ efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+ EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
+ efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+
+ efx_reado(efx, &reg, FR_AB_GLB_CTL);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
+ efx_writeo(efx, &reg, FR_AB_GLB_CTL);
+
+ count = 0;
+ while (1) {
+ efx_reado(efx, &reg, FR_AB_GLB_CTL);
+ if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
+ !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
+ !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "Completed MAC reset after %d loops\n",
+ count);
+ break;
+ }
+ if (count > 20) {
+ netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
+ break;
+ }
+ count++;
+ udelay(10);
+ }
+
+ /* Ensure the correct MAC is selected before statistics
+ * are re-enabled by the caller */
+ efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
+
+ falcon_setup_xaui(efx);
+}
+
+static void falcon_drain_tx_fifo(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+
+ if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
+ (efx->loopback_mode != LOOPBACK_NONE))
+ return;
+
+ efx_reado(efx, &reg, FR_AB_MAC_CTRL);
+ /* There is no point in draining more than once */
+ if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
+ return;
+
+ falcon_reset_macs(efx);
+}
+
+static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return;
+
+ /* Isolate the MAC -> RX */
+ efx_reado(efx, &reg, FR_AZ_RX_CFG);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
+ efx_writeo(efx, &reg, FR_AZ_RX_CFG);
+
+ /* Isolate TX -> MAC */
+ falcon_drain_tx_fifo(efx);
+}
+
+static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+ efx_oword_t reg;
+ int link_speed, isolate;
+
+ isolate = !!ACCESS_ONCE(efx->reset_pending);
+
+ switch (link_state->speed) {
+ case 10000: link_speed = 3; break;
+ case 1000: link_speed = 2; break;
+ case 100: link_speed = 1; break;
+ default: link_speed = 0; break;
+ }
+
+ /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
+ * as advertised. Disable to ensure packets are not
+ * indefinitely held and TX queue can be flushed at any point
+ * while the link is down. */
+ EFX_POPULATE_OWORD_5(reg,
+ FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
+ FRF_AB_MAC_BCAD_ACPT, 1,
+ FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
+ FRF_AB_MAC_LINK_STATUS, 1, /* always set */
+ FRF_AB_MAC_SPEED, link_speed);
+ /* On B0, MAC backpressure can be disabled and packets get
+ * discarded. */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
+ !link_state->up || isolate);
+ }
+
+ efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
+
+ /* Restore the multicast hash registers. */
+ falcon_push_multicast_hash(efx);
+
+ efx_reado(efx, &reg, FR_AZ_RX_CFG);
+ /* Enable XOFF signal from RX FIFO (we enabled it during NIC
+ * initialisation but it may read back as 0) */
+ EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
+ /* Unisolate the MAC -> RX */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
+ efx_writeo(efx, &reg, FR_AZ_RX_CFG);
+}
+
+static void falcon_stats_request(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+
+ WARN_ON(nic_data->stats_pending);
+ WARN_ON(nic_data->stats_disable_count);
+
+ FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
+ nic_data->stats_pending = true;
+ wmb(); /* ensure done flag is clear */
+
+ /* Initiate DMA transfer of stats */
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_MAC_STAT_DMA_CMD, 1,
+ FRF_AB_MAC_STAT_DMA_ADR,
+ efx->stats_buffer.dma_addr);
+ efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
+
+ mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
+}
+
+static void falcon_stats_complete(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ if (!nic_data->stats_pending)
+ return;
+
+ nic_data->stats_pending = false;
+ if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
+ rmb(); /* read the done flag before the stats */
+ efx_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask, nic_data->stats,
+ efx->stats_buffer.addr, true);
+ } else {
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for statistics\n");
+ }
+}
+
+static void falcon_stats_timer_func(unsigned long context)
+{
+ struct efx_nic *efx = (struct efx_nic *)context;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ spin_lock(&efx->stats_lock);
+
+ falcon_stats_complete(efx);
+ if (nic_data->stats_disable_count == 0)
+ falcon_stats_request(efx);
+
+ spin_unlock(&efx->stats_lock);
+}
+
+static bool falcon_loopback_link_poll(struct efx_nic *efx)
+{
+ struct efx_link_state old_state = efx->link_state;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+ WARN_ON(!LOOPBACK_INTERNAL(efx));
+
+ efx->link_state.fd = true;
+ efx->link_state.fc = efx->wanted_fc;
+ efx->link_state.up = true;
+ efx->link_state.speed = 10000;
+
+ return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+static int falcon_reconfigure_port(struct efx_nic *efx)
+{
+ int rc;
+
+ WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);
+
+ /* Poll the PHY link state *before* reconfiguring it. This means we
+ * will pick up the correct speed (in loopback) to select the correct
+ * MAC.
+ */
+ if (LOOPBACK_INTERNAL(efx))
+ falcon_loopback_link_poll(efx);
+ else
+ efx->phy_op->poll(efx);
+
+ falcon_stop_nic_stats(efx);
+ falcon_deconfigure_mac_wrapper(efx);
+
+ falcon_reset_macs(efx);
+
+ efx->phy_op->reconfigure(efx);
+ rc = falcon_reconfigure_xmac(efx);
+ BUG_ON(rc);
+
+ falcon_start_nic_stats(efx);
+
+ /* Synchronise efx->link_state with the kernel */
+ efx_link_status_changed(efx);
+
+ return 0;
+}
+
+/* TX flow control may automatically turn itself off if the link
+ * partner (intermittently) stops responding to pause frames. There
+ * isn't any indication that this has happened, so the best we do is
+ * leave it up to the user to spot this and fix it by cycling transmit
+ * flow control on this end.
+ */
+
+static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
+{
+ /* Schedule a reset to recover */
+ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
+}
+
+static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
+{
+ /* Recover by resetting the EM block */
+ falcon_stop_nic_stats(efx);
+ falcon_drain_tx_fifo(efx);
+ falcon_reconfigure_xmac(efx);
+ falcon_start_nic_stats(efx);
+}
+
+/**************************************************************************
+ *
+ * PHY access via GMII
+ *
+ **************************************************************************
+ */
+
+/* Wait for GMII access to complete */
+static int falcon_gmii_wait(struct efx_nic *efx)
+{
+ efx_oword_t md_stat;
+ int count;
+
+ /* wait up to 50ms - taken max from datasheet */
+ for (count = 0; count < 5000; count++) {
+ efx_reado(efx, &md_stat, FR_AB_MD_STAT);
+ if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
+ if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
+ EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "error from GMII access "
+ EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(md_stat));
+ return -EIO;
+ }
+ return 0;
+ }
+ udelay(10);
+ }
+ netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
+ return -ETIMEDOUT;
+}
+
+/* Write an MDIO register of a PHY connected to Falcon. */
+static int falcon_mdio_write(struct net_device *net_dev,
+ int prtad, int devad, u16 addr, u16 value)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+ int rc;
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing MDIO %d register %d.%d with 0x%04x\n",
+ prtad, devad, addr, value);
+
+ mutex_lock(&nic_data->mdio_lock);
+
+ /* Check MDIO not currently being accessed */
+ rc = falcon_gmii_wait(efx);
+ if (rc)
+ goto out;
+
+ /* Write the address/ID register */
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
+ efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
+
+ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
+ FRF_AB_MD_DEV_ADR, devad);
+ efx_writeo(efx, &reg, FR_AB_MD_ID);
+
+ /* Write data */
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
+ efx_writeo(efx, &reg, FR_AB_MD_TXD);
+
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_MD_WRC, 1,
+ FRF_AB_MD_GC, 0);
+ efx_writeo(efx, &reg, FR_AB_MD_CS);
+
+ /* Wait for data to be written */
+ rc = falcon_gmii_wait(efx);
+ if (rc) {
+ /* Abort the write operation */
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_MD_WRC, 0,
+ FRF_AB_MD_GC, 1);
+ efx_writeo(efx, &reg, FR_AB_MD_CS);
+ udelay(10);
+ }
+
+out:
+ mutex_unlock(&nic_data->mdio_lock);
+ return rc;
+}
+
+/* Read an MDIO register of a PHY connected to Falcon. */
+static int falcon_mdio_read(struct net_device *net_dev,
+ int prtad, int devad, u16 addr)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+ int rc;
+
+ mutex_lock(&nic_data->mdio_lock);
+
+ /* Check MDIO not currently being accessed */
+ rc = falcon_gmii_wait(efx);
+ if (rc)
+ goto out;
+
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
+ efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
+
+ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
+ FRF_AB_MD_DEV_ADR, devad);
+ efx_writeo(efx, &reg, FR_AB_MD_ID);
+
+ /* Request data to be read */
+ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
+ efx_writeo(efx, &reg, FR_AB_MD_CS);
+
+ /* Wait for data to become available */
+ rc = falcon_gmii_wait(efx);
+ if (rc == 0) {
+ efx_reado(efx, &reg, FR_AB_MD_RXD);
+ rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from MDIO %d register %d.%d, got %04x\n",
+ prtad, devad, addr, rc);
+ } else {
+ /* Abort the read operation */
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_MD_RIC, 0,
+ FRF_AB_MD_GC, 1);
+ efx_writeo(efx, &reg, FR_AB_MD_CS);
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "read from MDIO %d register %d.%d, got error %d\n",
+ prtad, devad, addr, rc);
+ }
+
+out:
+ mutex_unlock(&nic_data->mdio_lock);
+ return rc;
+}
+
+/* This call is responsible for hooking in the MAC and PHY operations */
+static int falcon_probe_port(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ switch (efx->phy_type) {
+ case PHY_TYPE_SFX7101:
+ efx->phy_op = &falcon_sfx7101_phy_ops;
+ break;
+ case PHY_TYPE_QT2022C2:
+ case PHY_TYPE_QT2025C:
+ efx->phy_op = &falcon_qt202x_phy_ops;
+ break;
+ case PHY_TYPE_TXC43128:
+ efx->phy_op = &falcon_txc_phy_ops;
+ break;
+ default:
+ netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
+ efx->phy_type);
+ return -ENODEV;
+ }
+
+ /* Fill out MDIO structure and loopback modes */
+ mutex_init(&nic_data->mdio_lock);
+ efx->mdio.mdio_read = falcon_mdio_read;
+ efx->mdio.mdio_write = falcon_mdio_write;
+ rc = efx->phy_op->probe(efx);
+ if (rc != 0)
+ return rc;
+
+ /* Initial assumption */
+ efx->link_state.speed = 10000;
+ efx->link_state.fd = true;
+
+ /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ else
+ efx->wanted_fc = EFX_FC_RX;
+ if (efx->mdio.mmds & MDIO_DEVS_AN)
+ efx->wanted_fc |= EFX_FC_AUTO;
+
+ /* Allocate buffer for stats */
+ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+ FALCON_MAC_STATS_SIZE, GFP_KERNEL);
+ if (rc)
+ return rc;
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
+
+ return 0;
+}
+
+static void falcon_remove_port(struct efx_nic *efx)
+{
+ efx->phy_op->remove(efx);
+ efx_nic_free_buffer(efx, &efx->stats_buffer);
+}
+
+/* Global events are basically PHY events */
+static bool
+falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
+ EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
+ EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
+ /* Ignored */
+ return true;
+
+ if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
+ EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
+ nic_data->xmac_poll_required = true;
+ return true;
+ }
+
+ if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
+ EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
+ EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen global RX_RESET event. Resetting.\n",
+ channel->channel);
+
+ atomic_inc(&efx->rx_reset);
+ efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
+ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return true;
+ }
+
+ return false;
+}
+
+/**************************************************************************
+ *
+ * Falcon test code
+ *
+ **************************************************************************/
+
+static int
+falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct falcon_nvconfig *nvconfig;
+ struct falcon_spi_device *spi;
+ void *region;
+ int rc, magic_num, struct_ver;
+ __le16 *word, *limit;
+ u32 csum;
+
+ if (falcon_spi_present(&nic_data->spi_flash))
+ spi = &nic_data->spi_flash;
+ else if (falcon_spi_present(&nic_data->spi_eeprom))
+ spi = &nic_data->spi_eeprom;
+ else
+ return -EINVAL;
+
+ region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+ nvconfig = region + FALCON_NVCONFIG_OFFSET;
+
+ mutex_lock(&nic_data->spi_lock);
+ rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
+ mutex_unlock(&nic_data->spi_lock);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
+ falcon_spi_present(&nic_data->spi_flash) ?
+ "flash" : "EEPROM");
+ rc = -EIO;
+ goto out;
+ }
+
+ magic_num = le16_to_cpu(nvconfig->board_magic_num);
+ struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
+
+ rc = -EINVAL;
+ if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
+ netif_err(efx, hw, efx->net_dev,
+ "NVRAM bad magic 0x%x\n", magic_num);
+ goto out;
+ }
+ if (struct_ver < 2) {
+ netif_err(efx, hw, efx->net_dev,
+ "NVRAM has ancient version 0x%x\n", struct_ver);
+ goto out;
+ } else if (struct_ver < 4) {
+ word = &nvconfig->board_magic_num;
+ limit = (__le16 *) (nvconfig + 1);
+ } else {
+ word = region;
+ limit = region + FALCON_NVCONFIG_END;
+ }
+ for (csum = 0; word < limit; ++word)
+ csum += le16_to_cpu(*word);
+
+ if (~csum & 0xffff) {
+ netif_err(efx, hw, efx->net_dev,
+ "NVRAM has incorrect checksum\n");
+ goto out;
+ }
+
+ rc = 0;
+ if (nvconfig_out)
+ memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
+
+ out:
+ kfree(region);
+ return rc;
+}
+
+static int falcon_test_nvram(struct efx_nic *efx)
+{
+ return falcon_read_nvram(efx, NULL);
+}
+
+static const struct efx_farch_register_test falcon_b0_register_tests[] = {
+ { FR_AZ_ADR_REGION,
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
+ { FR_AZ_RX_CFG,
+ EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
+ { FR_AZ_TX_CFG,
+ EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_TX_RESERVED,
+ EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
+ { FR_AB_MAC_CTRL,
+ EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_SRM_TX_DC_CFG,
+ EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_DC_CFG,
+ EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_DC_PF_WM,
+ EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_BZ_DP_CTRL,
+ EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_GM_CFG2,
+ EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_GMF_CFG0,
+ EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XM_GLB_CFG,
+ EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XM_TX_CFG,
+ EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XM_RX_CFG,
+ EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XM_RX_PARAM,
+ EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XM_FC,
+ EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XM_ADR_LO,
+ EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AB_XX_SD_CTL,
+ EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
+};
+
+static int
+falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ enum reset_type reset_method = RESET_TYPE_INVISIBLE;
+ int rc, rc2;
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->loopback_modes) {
+ /* We need the 312 clock from the PHY to test the XMAC
+ * registers, so move into XGMII loopback if available */
+ if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
+ efx->loopback_mode = LOOPBACK_XGMII;
+ else
+ efx->loopback_mode = __ffs(efx->loopback_modes);
+ }
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_reset_down(efx, reset_method);
+
+ tests->registers =
+ efx_farch_test_registers(efx, falcon_b0_register_tests,
+ ARRAY_SIZE(falcon_b0_register_tests))
+ ? -1 : 1;
+
+ rc = falcon_reset_hw(efx, reset_method);
+ rc2 = efx_reset_up(efx, reset_method, rc == 0);
+ return rc ? rc : rc2;
+}
+
+/**************************************************************************
+ *
+ * Device reset
+ *
+ **************************************************************************
+ */
+
+static enum reset_type falcon_map_reset_reason(enum reset_type reason)
+{
+ switch (reason) {
+ case RESET_TYPE_RX_RECOVERY:
+ case RESET_TYPE_DMA_ERROR:
+ case RESET_TYPE_TX_SKIP:
+ /* These can occasionally occur due to hardware bugs.
+ * We try to reset without disrupting the link.
+ */
+ return RESET_TYPE_INVISIBLE;
+ default:
+ return RESET_TYPE_ALL;
+ }
+}
+
+static int falcon_map_reset_flags(u32 *flags)
+{
+ enum {
+ FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC),
+ FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
+ FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
+ };
+
+ if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
+ *flags &= ~FALCON_RESET_WORLD;
+ return RESET_TYPE_WORLD;
+ }
+
+ if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
+ *flags &= ~FALCON_RESET_ALL;
+ return RESET_TYPE_ALL;
+ }
+
+ if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
+ *flags &= ~FALCON_RESET_INVISIBLE;
+ return RESET_TYPE_INVISIBLE;
+ }
+
+ return -EINVAL;
+}
+
+/* Resets NIC to known state. This routine must be called in process
+ * context and is allowed to sleep. */
+static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t glb_ctl_reg_ker;
+ int rc;
+
+ netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
+ RESET_TYPE(method));
+
+ /* Initiate device reset */
+ if (method == RESET_TYPE_WORLD) {
+ rc = pci_save_state(efx->pci_dev);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to backup PCI state of primary "
+ "function prior to hardware reset\n");
+ goto fail1;
+ }
+ if (efx_nic_is_dual_func(efx)) {
+ rc = pci_save_state(nic_data->pci_dev2);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to backup PCI state of "
+ "secondary function prior to "
+ "hardware reset\n");
+ goto fail2;
+ }
+ }
+
+ EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
+ FRF_AB_EXT_PHY_RST_DUR,
+ FFE_AB_EXT_PHY_RST_DUR_10240US,
+ FRF_AB_SWRST, 1);
+ } else {
+ EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
+ /* exclude PHY from "invisible" reset */
+ FRF_AB_EXT_PHY_RST_CTL,
+ method == RESET_TYPE_INVISIBLE,
+ /* exclude EEPROM/flash and PCIe */
+ FRF_AB_PCIE_CORE_RST_CTL, 1,
+ FRF_AB_PCIE_NSTKY_RST_CTL, 1,
+ FRF_AB_PCIE_SD_RST_CTL, 1,
+ FRF_AB_EE_RST_CTL, 1,
+ FRF_AB_EXT_PHY_RST_DUR,
+ FFE_AB_EXT_PHY_RST_DUR_10240US,
+ FRF_AB_SWRST, 1);
+ }
+ efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
+
+ netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
+ schedule_timeout_uninterruptible(HZ / 20);
+
+ /* Restore PCI configuration if needed */
+ if (method == RESET_TYPE_WORLD) {
+ if (efx_nic_is_dual_func(efx))
+ pci_restore_state(nic_data->pci_dev2);
+ pci_restore_state(efx->pci_dev);
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully restored PCI config\n");
+ }
+
+ /* Assert that reset complete */
+ efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
+ if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
+ rc = -ETIMEDOUT;
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for hardware reset\n");
+ goto fail3;
+ }
+ netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
+
+ return 0;
+
+ /* pci_save_state() and pci_restore_state() MUST be called in pairs */
+fail2:
+ pci_restore_state(efx->pci_dev);
+fail1:
+fail3:
+ return rc;
+}
+
+static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ mutex_lock(&nic_data->spi_lock);
+ rc = __falcon_reset_hw(efx, method);
+ mutex_unlock(&nic_data->spi_lock);
+
+ return rc;
+}
+
+static void falcon_monitor(struct efx_nic *efx)
+{
+ bool link_changed;
+ int rc;
+
+ BUG_ON(!mutex_is_locked(&efx->mac_lock));
+
+ rc = falcon_board(efx)->type->monitor(efx);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "Board sensor %s; shutting down PHY\n",
+ (rc == -ERANGE) ? "reported fault" : "failed");
+ efx->phy_mode |= PHY_MODE_LOW_POWER;
+ rc = __efx_reconfigure_port(efx);
+ WARN_ON(rc);
+ }
+
+ if (LOOPBACK_INTERNAL(efx))
+ link_changed = falcon_loopback_link_poll(efx);
+ else
+ link_changed = efx->phy_op->poll(efx);
+
+ if (link_changed) {
+ falcon_stop_nic_stats(efx);
+ falcon_deconfigure_mac_wrapper(efx);
+
+ falcon_reset_macs(efx);
+ rc = falcon_reconfigure_xmac(efx);
+ BUG_ON(rc);
+
+ falcon_start_nic_stats(efx);
+
+ efx_link_status_changed(efx);
+ }
+
+ falcon_poll_xmac(efx);
+}
+
+/* Zeroes out the SRAM contents. This routine must be called in
+ * process context and is allowed to sleep.
+ */
+static int falcon_reset_sram(struct efx_nic *efx)
+{
+ efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
+ int count;
+
+ /* Set the SRAM wake/sleep GPIO appropriately. */
+ efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
+ EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
+ EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
+ efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
+
+ /* Initiate SRAM reset */
+ EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
+ FRF_AZ_SRM_INIT_EN, 1,
+ FRF_AZ_SRM_NB_SZ, 0);
+ efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
+
+ /* Wait for SRAM reset to complete */
+ count = 0;
+ do {
+ netif_dbg(efx, hw, efx->net_dev,
+ "waiting for SRAM reset (attempt %d)...\n", count);
+
+ /* SRAM reset is slow; expect around 16ms */
+ schedule_timeout_uninterruptible(HZ / 50);
+
+ /* Check for reset complete */
+ efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
+ if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "SRAM reset complete\n");
+
+ return 0;
+ }
+ } while (++count < 20); /* wait up to 0.4 sec */
+
+ netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
+ return -ETIMEDOUT;
+}
+
+static void falcon_spi_device_init(struct efx_nic *efx,
+ struct falcon_spi_device *spi_device,
+ unsigned int device_id, u32 device_type)
+{
+ if (device_type != 0) {
+ spi_device->device_id = device_id;
+ spi_device->size =
+ 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
+ spi_device->addr_len =
+ SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
+ spi_device->munge_address = (spi_device->size == 1 << 9 &&
+ spi_device->addr_len == 1);
+ spi_device->erase_command =
+ SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
+ spi_device->erase_size =
+ 1 << SPI_DEV_TYPE_FIELD(device_type,
+ SPI_DEV_TYPE_ERASE_SIZE);
+ spi_device->block_size =
+ 1 << SPI_DEV_TYPE_FIELD(device_type,
+ SPI_DEV_TYPE_BLOCK_SIZE);
+ } else {
+ spi_device->size = 0;
+ }
+}
+
+/* Extract non-volatile configuration */
+static int falcon_probe_nvconfig(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct falcon_nvconfig *nvconfig;
+ int rc;
+
+ nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
+ if (!nvconfig)
+ return -ENOMEM;
+
+ rc = falcon_read_nvram(efx, nvconfig);
+ if (rc)
+ goto out;
+
+ efx->phy_type = nvconfig->board_v2.port0_phy_type;
+ efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
+
+ if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
+ falcon_spi_device_init(
+ efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
+ le32_to_cpu(nvconfig->board_v3
+ .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
+ falcon_spi_device_init(
+ efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
+ le32_to_cpu(nvconfig->board_v3
+ .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
+ }
+
+ /* Read the MAC addresses */
+ ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]);
+
+ netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
+ efx->phy_type, efx->mdio.prtad);
+
+ rc = falcon_probe_board(efx,
+ le16_to_cpu(nvconfig->board_v2.board_revision));
+out:
+ kfree(nvconfig);
+ return rc;
+}
+
+static int falcon_dimension_resources(struct efx_nic *efx)
+{
+ efx->rx_dc_base = 0x20000;
+ efx->tx_dc_base = 0x26000;
+ return 0;
+}
+
+/* Probe all SPI devices on the NIC */
+static void falcon_probe_spi_devices(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
+ int boot_dev;
+
+ efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
+ efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
+ efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
+
+ if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
+ boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
+ FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
+ netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
+ boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
+ "flash" : "EEPROM");
+ } else {
+ /* Disable VPD and set clock dividers to safe
+ * values for initial programming. */
+ boot_dev = -1;
+ netif_dbg(efx, probe, efx->net_dev,
+ "Booted from internal ASIC settings;"
+ " setting SPI config\n");
+ EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
+ /* 125 MHz / 7 ~= 20 MHz */
+ FRF_AB_EE_SF_CLOCK_DIV, 7,
+ /* 125 MHz / 63 ~= 2 MHz */
+ FRF_AB_EE_EE_CLOCK_DIV, 63);
+ efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
+ }
+
+ mutex_init(&nic_data->spi_lock);
+
+ if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
+ falcon_spi_device_init(efx, &nic_data->spi_flash,
+ FFE_AB_SPI_DEVICE_FLASH,
+ default_flash_type);
+ if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
+ falcon_spi_device_init(efx, &nic_data->spi_eeprom,
+ FFE_AB_SPI_DEVICE_EEPROM,
+ large_eeprom_type);
+}
+
+static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
+{
+ return 0x20000;
+}
+
+static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
+{
+ /* Map everything up to and including the RSS indirection table.
+ * The PCI core takes care of mapping the MSI-X tables.
+ */
+ return FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
+}
+
+static int falcon_probe_nic(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data;
+ struct falcon_board *board;
+ int rc;
+
+ efx->primary = efx; /* only one usable function per controller */
+
+ /* Allocate storage for hardware specific data */
+ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
+ efx->nic_data = nic_data;
+
+ rc = -ENODEV;
+
+ if (efx_farch_fpga_ver(efx) != 0) {
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon FPGA not supported\n");
+ goto fail1;
+ }
+
+ if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
+ efx_oword_t nic_stat;
+ struct pci_dev *dev;
+ u8 pci_rev = efx->pci_dev->revision;
+
+ if ((pci_rev == 0xff) || (pci_rev == 0)) {
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon rev A0 not supported\n");
+ goto fail1;
+ }
+ efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
+ if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon rev A1 1G not supported\n");
+ goto fail1;
+ }
+ if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon rev A1 PCI-X not supported\n");
+ goto fail1;
+ }
+
+ dev = pci_dev_get(efx->pci_dev);
+ while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
+ dev))) {
+ if (dev->bus == efx->pci_dev->bus &&
+ dev->devfn == efx->pci_dev->devfn + 1) {
+ nic_data->pci_dev2 = dev;
+ break;
+ }
+ }
+ if (!nic_data->pci_dev2) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to find secondary function\n");
+ rc = -ENODEV;
+ goto fail2;
+ }
+ }
+
+ /* Now we can reset the NIC */
+ rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
+ goto fail3;
+ }
+
+ /* Allocate memory for INT_KER */
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
+ GFP_KERNEL);
+ if (rc)
+ goto fail4;
+ BUG_ON(efx->irq_status.dma_addr & 0x0f);
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "INT_KER at %llx (virt %p phys %llx)\n",
+ (u64)efx->irq_status.dma_addr,
+ efx->irq_status.addr,
+ (u64)virt_to_phys(efx->irq_status.addr));
+
+ falcon_probe_spi_devices(efx);
+
+ /* Read in the non-volatile configuration */
+ rc = falcon_probe_nvconfig(efx);
+ if (rc) {
+ if (rc == -EINVAL)
+ netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
+ goto fail5;
+ }
+
+ efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
+ EFX_MAX_CHANNELS);
+ efx->timer_quantum_ns = 4968; /* 621 cycles */
+
+ /* Initialise I2C adapter */
+ board = falcon_board(efx);
+ board->i2c_adap.owner = THIS_MODULE;
+ board->i2c_data = falcon_i2c_bit_operations;
+ board->i2c_data.data = efx;
+ board->i2c_adap.algo_data = &board->i2c_data;
+ board->i2c_adap.dev.parent = &efx->pci_dev->dev;
+ strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
+ sizeof(board->i2c_adap.name));
+ rc = i2c_bit_add_bus(&board->i2c_adap);
+ if (rc)
+ goto fail5;
+
+ rc = falcon_board(efx)->type->init(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise board\n");
+ goto fail6;
+ }
+
+ nic_data->stats_disable_count = 1;
+ setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
+ (unsigned long)efx);
+
+ return 0;
+
+ fail6:
+ i2c_del_adapter(&board->i2c_adap);
+ memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
+ fail5:
+ efx_nic_free_buffer(efx, &efx->irq_status);
+ fail4:
+ fail3:
+ if (nic_data->pci_dev2) {
+ pci_dev_put(nic_data->pci_dev2);
+ nic_data->pci_dev2 = NULL;
+ }
+ fail2:
+ fail1:
+ kfree(efx->nic_data);
+ return rc;
+}
+
+static void falcon_init_rx_cfg(struct efx_nic *efx)
+{
+ /* RX control FIFO thresholds (32 entries) */
+ const unsigned ctrl_xon_thr = 20;
+ const unsigned ctrl_xoff_thr = 25;
+ efx_oword_t reg;
+
+ efx_reado(efx, &reg, FR_AZ_RX_CFG);
+ if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
+ /* Data FIFO size is 5.5K. The RX DMA engine only
+ * supports scattering for user-mode queues, but will
+ * split DMA writes at intervals of RX_USR_BUF_SIZE
+ * (32-byte units) even for kernel-mode queues. We
+ * set it to be so large that that never happens.
+ */
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
+ (3 * 4096) >> 5);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
+ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
+ } else {
+ /* Data FIFO size is 80K; register fields moved */
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
+ EFX_RX_USR_BUF_SIZE >> 5);
+ /* Send XON and XOFF at ~3 * max MTU away from empty/full */
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+
+ /* Enable hash insertion. This is broken for the
+ * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
+ * IPv4 hashes. */
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
+ }
+ /* Always enable XOFF signal from RX FIFO. We enable
+ * or disable transmission of pause frames at the MAC. */
+ EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
+ efx_writeo(efx, &reg, FR_AZ_RX_CFG);
+}
+
+/* This call performs hardware-specific global initialisation, such as
+ * defining the descriptor cache sizes and number of RSS channels.
+ * It does not set up any buffers, descriptor rings or event queues.
+ */
+static int falcon_init_nic(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+ int rc;
+
+ /* Use on-chip SRAM */
+ efx_reado(efx, &temp, FR_AB_NIC_STAT);
+ EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
+ efx_writeo(efx, &temp, FR_AB_NIC_STAT);
+
+ rc = falcon_reset_sram(efx);
+ if (rc)
+ return rc;
+
+ /* Clear the parity enables on the TX data fifos as
+ * they produce false parity errors because of timing issues
+ */
+ if (EFX_WORKAROUND_5129(efx)) {
+ efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
+ EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
+ efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
+ }
+
+ if (EFX_WORKAROUND_7244(efx)) {
+ efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
+ efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
+ }
+
+ /* XXX This is documented only for Falcon A0/A1 */
+ /* Setup RX. Wait for descriptor is broken and must
+ * be disabled. RXDP recovery shouldn't be needed, but is.
+ */
+ efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
+ EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
+ if (EFX_WORKAROUND_5583(efx))
+ EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
+ efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
+
+ /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
+ * descriptors (which is bad).
+ */
+ efx_reado(efx, &temp, FR_AZ_TX_CFG);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+ efx_writeo(efx, &temp, FR_AZ_TX_CFG);
+
+ falcon_init_rx_cfg(efx);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ falcon_b0_rx_push_rss_config(efx);
+
+ /* Set destination of both TX and RX Flush events */
+ EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
+ efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
+ }
+
+ efx_farch_init_common(efx);
+
+ return 0;
+}
+
+static void falcon_remove_nic(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct falcon_board *board = falcon_board(efx);
+
+ board->type->fini(efx);
+
+ /* Remove I2C adapter and clear it in preparation for a retry */
+ i2c_del_adapter(&board->i2c_adap);
+ memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
+
+ efx_nic_free_buffer(efx, &efx->irq_status);
+
+ __falcon_reset_hw(efx, RESET_TYPE_ALL);
+
+ /* Release the second function after the reset */
+ if (nic_data->pci_dev2) {
+ pci_dev_put(nic_data->pci_dev2);
+ nic_data->pci_dev2 = NULL;
+ }
+
+ /* Tear down the private nic state */
+ kfree(efx->nic_data);
+ efx->nic_data = NULL;
+}
+
+static size_t falcon_describe_nic_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask, names);
+}
+
+static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ efx_oword_t cnt;
+
+ if (!nic_data->stats_disable_count) {
+ efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
+ stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
+ EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+
+ if (nic_data->stats_pending &&
+ FALCON_XMAC_STATS_DMA_FLAG(efx)) {
+ nic_data->stats_pending = false;
+ rmb(); /* read the done flag before the stats */
+ efx_nic_update_stats(
+ falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask,
+ stats, efx->stats_buffer.addr, true);
+ }
+
+ /* Update derived statistic */
+ efx_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
+ stats[FALCON_STAT_rx_bytes] -
+ stats[FALCON_STAT_rx_good_bytes] -
+ stats[FALCON_STAT_rx_control] * 64);
+ efx_update_sw_stats(efx, stats);
+ }
+
+ if (full_stats)
+ memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
+ core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
+ core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt] +
+ stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[FALCON_STAT_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[FALCON_STAT_rx_gtjumbo] +
+ stats[FALCON_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
+
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors +
+ stats[FALCON_STAT_rx_symbol_error]);
+ }
+
+ return FALCON_STAT_COUNT;
+}
+
+void falcon_start_nic_stats(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ spin_lock_bh(&efx->stats_lock);
+ if (--nic_data->stats_disable_count == 0)
+ falcon_stats_request(efx);
+ spin_unlock_bh(&efx->stats_lock);
+}
+
+/* We don't acutally pull stats on falcon. Wait 10ms so that
+ * they arrive when we call this just after start_stats
+ */
+static void falcon_pull_nic_stats(struct efx_nic *efx)
+{
+ msleep(10);
+}
+
+void falcon_stop_nic_stats(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int i;
+
+ might_sleep();
+
+ spin_lock_bh(&efx->stats_lock);
+ ++nic_data->stats_disable_count;
+ spin_unlock_bh(&efx->stats_lock);
+
+ del_timer_sync(&nic_data->stats_timer);
+
+ /* Wait enough time for the most recent transfer to
+ * complete. */
+ for (i = 0; i < 4 && nic_data->stats_pending; i++) {
+ if (FALCON_XMAC_STATS_DMA_FLAG(efx))
+ break;
+ msleep(1);
+ }
+
+ spin_lock_bh(&efx->stats_lock);
+ falcon_stats_complete(efx);
+ spin_unlock_bh(&efx->stats_lock);
+}
+
+static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ falcon_board(efx)->type->set_id_led(efx, mode);
+}
+
+/**************************************************************************
+ *
+ * Wake on LAN
+ *
+ **************************************************************************
+ */
+
+static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int falcon_set_wol(struct efx_nic *efx, u32 type)
+{
+ if (type != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/**************************************************************************
+ *
+ * Revision-dependent attributes used by efx.c and nic.c
+ *
+ **************************************************************************
+ */
+
+const struct efx_nic_type falcon_a1_nic_type = {
+ .mem_map_size = falcon_a1_mem_map_size,
+ .probe = falcon_probe_nic,
+ .remove = falcon_remove_nic,
+ .init = falcon_init_nic,
+ .dimension_resources = falcon_dimension_resources,
+ .fini = falcon_irq_ack_a1,
+ .monitor = falcon_monitor,
+ .map_reset_reason = falcon_map_reset_reason,
+ .map_reset_flags = falcon_map_reset_flags,
+ .reset = falcon_reset_hw,
+ .probe_port = falcon_probe_port,
+ .remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
+ .fini_dmaq = efx_farch_fini_dmaq,
+ .prepare_flush = falcon_prepare_flush,
+ .finish_flush = efx_port_dummy_op_void,
+ .prepare_flr = efx_port_dummy_op_void,
+ .finish_flr = efx_farch_finish_flr,
+ .describe_stats = falcon_describe_nic_stats,
+ .update_stats = falcon_update_nic_stats,
+ .start_stats = falcon_start_nic_stats,
+ .pull_stats = falcon_pull_nic_stats,
+ .stop_stats = falcon_stop_nic_stats,
+ .set_id_led = falcon_set_id_led,
+ .push_irq_moderation = falcon_push_irq_moderation,
+ .reconfigure_port = falcon_reconfigure_port,
+ .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
+ .reconfigure_mac = falcon_reconfigure_xmac,
+ .check_mac_fault = falcon_xmac_check_fault,
+ .get_wol = falcon_get_wol,
+ .set_wol = falcon_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ .test_nvram = falcon_test_nvram,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = falcon_legacy_interrupt_a1,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_rss_config = efx_port_dummy_op_void,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+
+ /* We don't expose the filter table on Falcon A1 as it is not
+ * mapped into function 0, but these implementations still
+ * work with a degenerate case of all tables set to size 0.
+ */
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = falcon_mtd_probe,
+ .mtd_rename = falcon_mtd_rename,
+ .mtd_read = falcon_mtd_read,
+ .mtd_erase = falcon_mtd_erase,
+ .mtd_write = falcon_mtd_write,
+ .mtd_sync = falcon_mtd_sync,
+#endif
+ .sriov_init = efx_falcon_sriov_init,
+ .sriov_fini = efx_falcon_sriov_fini,
+ .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
+ .sriov_wanted = efx_falcon_sriov_wanted,
+ .sriov_reset = efx_falcon_sriov_reset,
+
+ .revision = EFX_REV_FALCON_A1,
+ .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
+ .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
+ .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
+ .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
+ .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
+ .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_buffer_padding = 0x24,
+ .can_rx_scatter = false,
+ .max_interrupt_mode = EFX_INT_MODE_MSI,
+ .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
+ .offload_features = NETIF_F_IP_CSUM,
+ .mcdi_max_ver = -1,
+};
+
+const struct efx_nic_type falcon_b0_nic_type = {
+ .mem_map_size = falcon_b0_mem_map_size,
+ .probe = falcon_probe_nic,
+ .remove = falcon_remove_nic,
+ .init = falcon_init_nic,
+ .dimension_resources = falcon_dimension_resources,
+ .fini = efx_port_dummy_op_void,
+ .monitor = falcon_monitor,
+ .map_reset_reason = falcon_map_reset_reason,
+ .map_reset_flags = falcon_map_reset_flags,
+ .reset = falcon_reset_hw,
+ .probe_port = falcon_probe_port,
+ .remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
+ .fini_dmaq = efx_farch_fini_dmaq,
+ .prepare_flush = falcon_prepare_flush,
+ .finish_flush = efx_port_dummy_op_void,
+ .prepare_flr = efx_port_dummy_op_void,
+ .finish_flr = efx_farch_finish_flr,
+ .describe_stats = falcon_describe_nic_stats,
+ .update_stats = falcon_update_nic_stats,
+ .start_stats = falcon_start_nic_stats,
+ .pull_stats = falcon_pull_nic_stats,
+ .stop_stats = falcon_stop_nic_stats,
+ .set_id_led = falcon_set_id_led,
+ .push_irq_moderation = falcon_push_irq_moderation,
+ .reconfigure_port = falcon_reconfigure_port,
+ .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
+ .reconfigure_mac = falcon_reconfigure_xmac,
+ .check_mac_fault = falcon_xmac_check_fault,
+ .get_wol = falcon_get_wol,
+ .set_wol = falcon_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ .test_chip = falcon_b0_test_chip,
+ .test_nvram = falcon_test_nvram,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = efx_farch_legacy_interrupt,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_rss_config = falcon_b0_rx_push_rss_config,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_farch_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = falcon_mtd_probe,
+ .mtd_rename = falcon_mtd_rename,
+ .mtd_read = falcon_mtd_read,
+ .mtd_erase = falcon_mtd_erase,
+ .mtd_write = falcon_mtd_write,
+ .mtd_sync = falcon_mtd_sync,
+#endif
+ .sriov_init = efx_falcon_sriov_init,
+ .sriov_fini = efx_falcon_sriov_fini,
+ .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
+ .sriov_wanted = efx_falcon_sriov_wanted,
+ .sriov_reset = efx_falcon_sriov_reset,
+
+ .revision = EFX_REV_FALCON_B0,
+ .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
+ .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
+ .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
+ .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
+ .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
+ .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
+ .rx_buffer_padding = 0,
+ .can_rx_scatter = true,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
+ .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
+ .mcdi_max_ver = -1,
+ .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
+};
diff --git a/kernel/drivers/net/ethernet/sfc/falcon_boards.c b/kernel/drivers/net/ethernet/sfc/falcon_boards.c
new file mode 100644
index 000000000..1736f4b80
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/falcon_boards.c
@@ -0,0 +1,764 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2012 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/rtnetlink.h>
+
+#include "net_driver.h"
+#include "phy.h"
+#include "efx.h"
+#include "nic.h"
+#include "workarounds.h"
+
+/* Macros for unpacking the board revision */
+/* The revision info is in host byte order. */
+#define FALCON_BOARD_TYPE(_rev) (_rev >> 8)
+#define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
+#define FALCON_BOARD_MINOR(_rev) (_rev & 0xf)
+
+/* Board types */
+#define FALCON_BOARD_SFE4001 0x01
+#define FALCON_BOARD_SFE4002 0x02
+#define FALCON_BOARD_SFE4003 0x03
+#define FALCON_BOARD_SFN4112F 0x52
+
+/* Board temperature is about 15°C above ambient when air flow is
+ * limited. The maximum acceptable ambient temperature varies
+ * depending on the PHY specifications but the critical temperature
+ * above which we should shut down to avoid damage is 80°C. */
+#define FALCON_BOARD_TEMP_BIAS 15
+#define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS)
+
+/* SFC4000 datasheet says: 'The maximum permitted junction temperature
+ * is 125°C; the thermal design of the environment for the SFC4000
+ * should aim to keep this well below 100°C.' */
+#define FALCON_JUNC_TEMP_MIN 0
+#define FALCON_JUNC_TEMP_MAX 90
+#define FALCON_JUNC_TEMP_CRIT 125
+
+/*****************************************************************************
+ * Support for LM87 sensor chip used on several boards
+ */
+#define LM87_REG_TEMP_HW_INT_LOCK 0x13
+#define LM87_REG_TEMP_HW_EXT_LOCK 0x14
+#define LM87_REG_TEMP_HW_INT 0x17
+#define LM87_REG_TEMP_HW_EXT 0x18
+#define LM87_REG_TEMP_EXT1 0x26
+#define LM87_REG_TEMP_INT 0x27
+#define LM87_REG_ALARMS1 0x41
+#define LM87_REG_ALARMS2 0x42
+#define LM87_IN_LIMITS(nr, _min, _max) \
+ 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
+#define LM87_AIN_LIMITS(nr, _min, _max) \
+ 0x3B + (nr), _max, 0x1A + (nr), _min
+#define LM87_TEMP_INT_LIMITS(_min, _max) \
+ 0x39, _max, 0x3A, _min
+#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
+ 0x37, _max, 0x38, _min
+
+#define LM87_ALARM_TEMP_INT 0x10
+#define LM87_ALARM_TEMP_EXT1 0x20
+
+#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
+
+static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
+{
+ while (*reg_values) {
+ u8 reg = *reg_values++;
+ u8 value = *reg_values++;
+ int rc = i2c_smbus_write_byte_data(client, reg, value);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static const u8 falcon_lm87_common_regs[] = {
+ LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT,
+ LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT,
+ LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX),
+ LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT,
+ LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT,
+ 0
+};
+
+static int efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
+ const u8 *reg_values)
+{
+ struct falcon_board *board = falcon_board(efx);
+ struct i2c_client *client = i2c_new_device(&board->i2c_adap, info);
+ int rc;
+
+ if (!client)
+ return -EIO;
+
+ /* Read-to-clear alarm/interrupt status */
+ i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
+ i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
+
+ rc = efx_poke_lm87(client, reg_values);
+ if (rc)
+ goto err;
+ rc = efx_poke_lm87(client, falcon_lm87_common_regs);
+ if (rc)
+ goto err;
+
+ board->hwmon_client = client;
+ return 0;
+
+err:
+ i2c_unregister_device(client);
+ return rc;
+}
+
+static void efx_fini_lm87(struct efx_nic *efx)
+{
+ i2c_unregister_device(falcon_board(efx)->hwmon_client);
+}
+
+static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
+{
+ struct i2c_client *client = falcon_board(efx)->hwmon_client;
+ bool temp_crit, elec_fault, is_failure;
+ u16 alarms;
+ s32 reg;
+
+ /* If link is up then do not monitor temperature */
+ if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
+ return 0;
+
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
+ if (reg < 0)
+ return reg;
+ alarms = reg;
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
+ if (reg < 0)
+ return reg;
+ alarms |= reg << 8;
+ alarms &= mask;
+
+ temp_crit = false;
+ if (alarms & LM87_ALARM_TEMP_INT) {
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT);
+ if (reg < 0)
+ return reg;
+ if (reg > FALCON_BOARD_TEMP_CRIT)
+ temp_crit = true;
+ }
+ if (alarms & LM87_ALARM_TEMP_EXT1) {
+ reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1);
+ if (reg < 0)
+ return reg;
+ if (reg > FALCON_JUNC_TEMP_CRIT)
+ temp_crit = true;
+ }
+ elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1);
+ is_failure = temp_crit || elec_fault;
+
+ if (alarms)
+ netif_err(efx, hw, efx->net_dev,
+ "LM87 detected a hardware %s (status %02x:%02x)"
+ "%s%s%s%s\n",
+ is_failure ? "failure" : "problem",
+ alarms & 0xff, alarms >> 8,
+ (alarms & LM87_ALARM_TEMP_INT) ?
+ "; board is overheating" : "",
+ (alarms & LM87_ALARM_TEMP_EXT1) ?
+ "; controller is overheating" : "",
+ temp_crit ? "; reached critical temperature" : "",
+ elec_fault ? "; electrical fault" : "");
+
+ return is_failure ? -ERANGE : 0;
+}
+
+#else /* !CONFIG_SENSORS_LM87 */
+
+static inline int
+efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
+ const u8 *reg_values)
+{
+ return 0;
+}
+static inline void efx_fini_lm87(struct efx_nic *efx)
+{
+}
+static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
+{
+ return 0;
+}
+
+#endif /* CONFIG_SENSORS_LM87 */
+
+/*****************************************************************************
+ * Support for the SFE4001 NIC.
+ *
+ * The SFE4001 does not power-up fully at reset due to its high power
+ * consumption. We control its power via a PCA9539 I/O expander.
+ * It also has a MAX6647 temperature monitor which we expose to
+ * the lm90 driver.
+ *
+ * This also provides minimal support for reflashing the PHY, which is
+ * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
+ * On SFE4001 rev A2 and later this is connected to the 3V3X output of
+ * the IO-expander.
+ * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
+ * exclusive with the network device being open.
+ */
+
+/**************************************************************************
+ * Support for I2C IO Expander device on SFE4001
+ */
+#define PCA9539 0x74
+
+#define P0_IN 0x00
+#define P0_OUT 0x02
+#define P0_INVERT 0x04
+#define P0_CONFIG 0x06
+
+#define P0_EN_1V0X_LBN 0
+#define P0_EN_1V0X_WIDTH 1
+#define P0_EN_1V2_LBN 1
+#define P0_EN_1V2_WIDTH 1
+#define P0_EN_2V5_LBN 2
+#define P0_EN_2V5_WIDTH 1
+#define P0_EN_3V3X_LBN 3
+#define P0_EN_3V3X_WIDTH 1
+#define P0_EN_5V_LBN 4
+#define P0_EN_5V_WIDTH 1
+#define P0_SHORTEN_JTAG_LBN 5
+#define P0_SHORTEN_JTAG_WIDTH 1
+#define P0_X_TRST_LBN 6
+#define P0_X_TRST_WIDTH 1
+#define P0_DSP_RESET_LBN 7
+#define P0_DSP_RESET_WIDTH 1
+
+#define P1_IN 0x01
+#define P1_OUT 0x03
+#define P1_INVERT 0x05
+#define P1_CONFIG 0x07
+
+#define P1_AFE_PWD_LBN 0
+#define P1_AFE_PWD_WIDTH 1
+#define P1_DSP_PWD25_LBN 1
+#define P1_DSP_PWD25_WIDTH 1
+#define P1_RESERVED_LBN 2
+#define P1_RESERVED_WIDTH 2
+#define P1_SPARE_LBN 4
+#define P1_SPARE_WIDTH 4
+
+/* Temperature Sensor */
+#define MAX664X_REG_RSL 0x02
+#define MAX664X_REG_WLHO 0x0B
+
+static void sfe4001_poweroff(struct efx_nic *efx)
+{
+ struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
+ struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
+
+ /* Turn off all power rails and disable outputs */
+ i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
+ i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
+ i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
+
+ /* Clear any over-temperature alert */
+ i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
+}
+
+static int sfe4001_poweron(struct efx_nic *efx)
+{
+ struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
+ struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
+ unsigned int i, j;
+ int rc;
+ u8 out;
+
+ /* Clear any previous over-temperature alert */
+ rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
+ if (rc < 0)
+ return rc;
+
+ /* Enable port 0 and port 1 outputs on IO expander */
+ rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
+ if (rc)
+ return rc;
+ rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
+ 0xff & ~(1 << P1_SPARE_LBN));
+ if (rc)
+ goto fail_on;
+
+ /* If PHY power is on, turn it all off and wait 1 second to
+ * ensure a full reset.
+ */
+ rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
+ if (rc < 0)
+ goto fail_on;
+ out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
+ (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
+ (0 << P0_EN_1V0X_LBN));
+ if (rc != out) {
+ netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n");
+ rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+ if (rc)
+ goto fail_on;
+ schedule_timeout_uninterruptible(HZ);
+ }
+
+ for (i = 0; i < 20; ++i) {
+ /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
+ out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
+ (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
+ (1 << P0_X_TRST_LBN));
+ if (efx->phy_mode & PHY_MODE_SPECIAL)
+ out |= 1 << P0_EN_3V3X_LBN;
+
+ rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+ if (rc)
+ goto fail_on;
+ msleep(10);
+
+ /* Turn on 1V power rail */
+ out &= ~(1 << P0_EN_1V0X_LBN);
+ rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
+ if (rc)
+ goto fail_on;
+
+ netif_info(efx, hw, efx->net_dev,
+ "waiting for DSP boot (attempt %d)...\n", i);
+
+ /* In flash config mode, DSP does not turn on AFE, so
+ * just wait 1 second.
+ */
+ if (efx->phy_mode & PHY_MODE_SPECIAL) {
+ schedule_timeout_uninterruptible(HZ);
+ return 0;
+ }
+
+ for (j = 0; j < 10; ++j) {
+ msleep(100);
+
+ /* Check DSP has asserted AFE power line */
+ rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
+ if (rc < 0)
+ goto fail_on;
+ if (rc & (1 << P1_AFE_PWD_LBN))
+ return 0;
+ }
+ }
+
+ netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n");
+ rc = -ETIMEDOUT;
+fail_on:
+ sfe4001_poweroff(efx);
+ return rc;
+}
+
+static ssize_t show_phy_flash_cfg(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
+}
+
+static ssize_t set_phy_flash_cfg(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ enum efx_phy_mode old_mode, new_mode;
+ int err;
+
+ rtnl_lock();
+ old_mode = efx->phy_mode;
+ if (count == 0 || *buf == '0')
+ new_mode = old_mode & ~PHY_MODE_SPECIAL;
+ else
+ new_mode = PHY_MODE_SPECIAL;
+ if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
+ err = 0;
+ } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
+ err = -EBUSY;
+ } else {
+ /* Reset the PHY, reconfigure the MAC and enable/disable
+ * MAC stats accordingly. */
+ efx->phy_mode = new_mode;
+ if (new_mode & PHY_MODE_SPECIAL)
+ falcon_stop_nic_stats(efx);
+ err = sfe4001_poweron(efx);
+ if (!err)
+ err = efx_reconfigure_port(efx);
+ if (!(new_mode & PHY_MODE_SPECIAL))
+ falcon_start_nic_stats(efx);
+ }
+ rtnl_unlock();
+
+ return err ? err : count;
+}
+
+static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
+
+static void sfe4001_fini(struct efx_nic *efx)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
+
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+ sfe4001_poweroff(efx);
+ i2c_unregister_device(board->ioexp_client);
+ i2c_unregister_device(board->hwmon_client);
+}
+
+static int sfe4001_check_hw(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ s32 status;
+
+ /* If XAUI link is up then do not monitor */
+ if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
+ return 0;
+
+ /* Check the powered status of the PHY. Lack of power implies that
+ * the MAX6647 has shut down power to it, probably due to a temp.
+ * alarm. Reading the power status rather than the MAX6647 status
+ * directly because the later is read-to-clear and would thus
+ * start to power up the PHY again when polled, causing us to blip
+ * the power undesirably.
+ * We know we can read from the IO expander because we did
+ * it during power-on. Assume failure now is bad news. */
+ status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN);
+ if (status >= 0 &&
+ (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
+ return 0;
+
+ /* Use board power control, not PHY power control */
+ sfe4001_poweroff(efx);
+ efx->phy_mode = PHY_MODE_OFF;
+
+ return (status < 0) ? -EIO : -ERANGE;
+}
+
+static const struct i2c_board_info sfe4001_hwmon_info = {
+ I2C_BOARD_INFO("max6647", 0x4e),
+};
+
+/* This board uses an I2C expander to provider power to the PHY, which needs to
+ * be turned on before the PHY can be used.
+ * Context: Process context, rtnl lock held
+ */
+static int sfe4001_init(struct efx_nic *efx)
+{
+ struct falcon_board *board = falcon_board(efx);
+ int rc;
+
+#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
+ board->hwmon_client =
+ i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
+#else
+ board->hwmon_client =
+ i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr);
+#endif
+ if (!board->hwmon_client)
+ return -EIO;
+
+ /* Raise board/PHY high limit from 85 to 90 degrees Celsius */
+ rc = i2c_smbus_write_byte_data(board->hwmon_client,
+ MAX664X_REG_WLHO, 90);
+ if (rc)
+ goto fail_hwmon;
+
+ board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539);
+ if (!board->ioexp_client) {
+ rc = -EIO;
+ goto fail_hwmon;
+ }
+
+ if (efx->phy_mode & PHY_MODE_SPECIAL) {
+ /* PHY won't generate a 156.25 MHz clock and MAC stats fetch
+ * will fail. */
+ falcon_stop_nic_stats(efx);
+ }
+ rc = sfe4001_poweron(efx);
+ if (rc)
+ goto fail_ioexp;
+
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
+ if (rc)
+ goto fail_on;
+
+ netif_info(efx, hw, efx->net_dev, "PHY is powered on\n");
+ return 0;
+
+fail_on:
+ sfe4001_poweroff(efx);
+fail_ioexp:
+ i2c_unregister_device(board->ioexp_client);
+fail_hwmon:
+ i2c_unregister_device(board->hwmon_client);
+ return rc;
+}
+
+/*****************************************************************************
+ * Support for the SFE4002
+ *
+ */
+static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
+
+static const u8 sfe4002_lm87_regs[] = {
+ LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */
+ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS),
+ LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
+ 0
+};
+
+static const struct i2c_board_info sfe4002_hwmon_info = {
+ I2C_BOARD_INFO("lm87", 0x2e),
+ .platform_data = &sfe4002_lm87_channel,
+};
+
+/****************************************************************************/
+/* LED allocations. Note that on rev A0 boards the schematic and the reality
+ * differ: red and green are swapped. Below is the fixed (A1) layout (there
+ * are only 3 A0 boards in existence, so no real reason to make this
+ * conditional).
+ */
+#define SFE4002_FAULT_LED (2) /* Red */
+#define SFE4002_RX_LED (0) /* Green */
+#define SFE4002_TX_LED (1) /* Amber */
+
+static void sfe4002_init_phy(struct efx_nic *efx)
+{
+ /* Set the TX and RX LEDs to reflect status and activity, and the
+ * fault LED off */
+ falcon_qt202x_set_led(efx, SFE4002_TX_LED,
+ QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
+ falcon_qt202x_set_led(efx, SFE4002_RX_LED,
+ QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
+ falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
+}
+
+static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ falcon_qt202x_set_led(
+ efx, SFE4002_FAULT_LED,
+ (mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF);
+}
+
+static int sfe4002_check_hw(struct efx_nic *efx)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ /* A0 board rev. 4002s report a temperature fault the whole time
+ * (bad sensor) so we mask it out. */
+ unsigned alarm_mask =
+ (board->major == 0 && board->minor == 0) ?
+ ~LM87_ALARM_TEMP_EXT1 : ~0;
+
+ return efx_check_lm87(efx, alarm_mask);
+}
+
+static int sfe4002_init(struct efx_nic *efx)
+{
+ return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
+}
+
+/*****************************************************************************
+ * Support for the SFN4112F
+ *
+ */
+static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
+
+static const u8 sfn4112f_lm87_regs[] = {
+ LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS),
+ LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
+ 0
+};
+
+static const struct i2c_board_info sfn4112f_hwmon_info = {
+ I2C_BOARD_INFO("lm87", 0x2e),
+ .platform_data = &sfn4112f_lm87_channel,
+};
+
+#define SFN4112F_ACT_LED 0
+#define SFN4112F_LINK_LED 1
+
+static void sfn4112f_init_phy(struct efx_nic *efx)
+{
+ falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
+ QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
+ falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
+ QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
+}
+
+static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ int reg;
+
+ switch (mode) {
+ case EFX_LED_OFF:
+ reg = QUAKE_LED_OFF;
+ break;
+ case EFX_LED_ON:
+ reg = QUAKE_LED_ON;
+ break;
+ default:
+ reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT;
+ break;
+ }
+
+ falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg);
+}
+
+static int sfn4112f_check_hw(struct efx_nic *efx)
+{
+ /* Mask out unused sensors */
+ return efx_check_lm87(efx, ~0x48);
+}
+
+static int sfn4112f_init(struct efx_nic *efx)
+{
+ return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
+}
+
+/*****************************************************************************
+ * Support for the SFE4003
+ *
+ */
+static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */
+
+static const u8 sfe4003_lm87_regs[] = {
+ LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS),
+ 0
+};
+
+static const struct i2c_board_info sfe4003_hwmon_info = {
+ I2C_BOARD_INFO("lm87", 0x2e),
+ .platform_data = &sfe4003_lm87_channel,
+};
+
+/* Board-specific LED info. */
+#define SFE4003_RED_LED_GPIO 11
+#define SFE4003_LED_ON 1
+#define SFE4003_LED_OFF 0
+
+static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ /* The LEDs were not wired to GPIOs before A3 */
+ if (board->minor < 3 && board->major == 0)
+ return;
+
+ falcon_txc_set_gpio_val(
+ efx, SFE4003_RED_LED_GPIO,
+ (mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
+}
+
+static void sfe4003_init_phy(struct efx_nic *efx)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ /* The LEDs were not wired to GPIOs before A3 */
+ if (board->minor < 3 && board->major == 0)
+ return;
+
+ falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT);
+ falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
+}
+
+static int sfe4003_check_hw(struct efx_nic *efx)
+{
+ struct falcon_board *board = falcon_board(efx);
+
+ /* A0/A1/A2 board rev. 4003s report a temperature fault the whole time
+ * (bad sensor) so we mask it out. */
+ unsigned alarm_mask =
+ (board->major == 0 && board->minor <= 2) ?
+ ~LM87_ALARM_TEMP_EXT1 : ~0;
+
+ return efx_check_lm87(efx, alarm_mask);
+}
+
+static int sfe4003_init(struct efx_nic *efx)
+{
+ return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
+}
+
+static const struct falcon_board_type board_types[] = {
+ {
+ .id = FALCON_BOARD_SFE4001,
+ .init = sfe4001_init,
+ .init_phy = efx_port_dummy_op_void,
+ .fini = sfe4001_fini,
+ .set_id_led = tenxpress_set_id_led,
+ .monitor = sfe4001_check_hw,
+ },
+ {
+ .id = FALCON_BOARD_SFE4002,
+ .init = sfe4002_init,
+ .init_phy = sfe4002_init_phy,
+ .fini = efx_fini_lm87,
+ .set_id_led = sfe4002_set_id_led,
+ .monitor = sfe4002_check_hw,
+ },
+ {
+ .id = FALCON_BOARD_SFE4003,
+ .init = sfe4003_init,
+ .init_phy = sfe4003_init_phy,
+ .fini = efx_fini_lm87,
+ .set_id_led = sfe4003_set_id_led,
+ .monitor = sfe4003_check_hw,
+ },
+ {
+ .id = FALCON_BOARD_SFN4112F,
+ .init = sfn4112f_init,
+ .init_phy = sfn4112f_init_phy,
+ .fini = efx_fini_lm87,
+ .set_id_led = sfn4112f_set_id_led,
+ .monitor = sfn4112f_check_hw,
+ },
+};
+
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
+{
+ struct falcon_board *board = falcon_board(efx);
+ u8 type_id = FALCON_BOARD_TYPE(revision_info);
+ int i;
+
+ board->major = FALCON_BOARD_MAJOR(revision_info);
+ board->minor = FALCON_BOARD_MINOR(revision_info);
+
+ for (i = 0; i < ARRAY_SIZE(board_types); i++)
+ if (board_types[i].id == type_id)
+ board->type = &board_types[i];
+
+ if (board->type) {
+ return 0;
+ } else {
+ netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
+ type_id);
+ return -ENODEV;
+ }
+}
diff --git a/kernel/drivers/net/ethernet/sfc/farch.c b/kernel/drivers/net/ethernet/sfc/farch.c
new file mode 100644
index 000000000..bb89e96a1
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/farch.c
@@ -0,0 +1,2969 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/crc32.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "farch_regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/* Falcon-architecture (SFC4000 and SFC9000-family) support */
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************
+ */
+
+/* This is set to 16 for a good reason. In summary, if larger than
+ * 16, the descriptor cache holds more than a default socket
+ * buffer's worth of packets (for UDP we can only have at most one
+ * socket buffer's worth outstanding). This combined with the fact
+ * that we only get 1 TX event per descriptor cache means the NIC
+ * goes idle.
+ */
+#define TX_DC_ENTRIES 16
+#define TX_DC_ENTRIES_ORDER 1
+
+#define RX_DC_ENTRIES 64
+#define RX_DC_ENTRIES_ORDER 3
+
+/* If EFX_MAX_INT_ERRORS internal errors occur within
+ * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
+ * disable it.
+ */
+#define EFX_INT_ERROR_EXPIRE 3600
+#define EFX_MAX_INT_ERRORS 5
+
+/* Depth of RX flush request fifo */
+#define EFX_RX_FLUSH_COUNT 4
+
+/* Driver generated events */
+#define _EFX_CHANNEL_MAGIC_TEST 0x000101
+#define _EFX_CHANNEL_MAGIC_FILL 0x000102
+#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
+#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
+
+#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
+#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
+
+#define EFX_CHANNEL_MAGIC_TEST(_channel) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
+#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
+ efx_rx_queue_index(_rx_queue))
+#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
+ efx_rx_queue_index(_rx_queue))
+#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
+ (_tx_queue)->queue)
+
+static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
+
+/**************************************************************************
+ *
+ * Hardware access
+ *
+ **************************************************************************/
+
+static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
+ unsigned int index)
+{
+ efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
+ value, index);
+}
+
+static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
+ const efx_oword_t *mask)
+{
+ return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
+ ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
+}
+
+int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs)
+{
+ unsigned address = 0, i, j;
+ efx_oword_t mask, imask, original, reg, buf;
+
+ for (i = 0; i < n_regs; ++i) {
+ address = regs[i].address;
+ mask = imask = regs[i].mask;
+ EFX_INVERT_OWORD(imask);
+
+ efx_reado(efx, &original, address);
+
+ /* bit sweep on and off */
+ for (j = 0; j < 128; j++) {
+ if (!EFX_EXTRACT_OWORD32(mask, j, j))
+ continue;
+
+ /* Test this testable bit can be set in isolation */
+ EFX_AND_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 1);
+
+ efx_writeo(efx, &reg, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+
+ /* Test this testable bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 0);
+
+ efx_writeo(efx, &reg, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+ }
+
+ efx_writeo(efx, &original, address);
+ }
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev,
+ "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+ " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+ EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * Special buffer handling
+ * Special buffers are used for event queues and the TX and RX
+ * descriptor rings.
+ *
+ *************************************************************************/
+
+/*
+ * Initialise a special buffer
+ *
+ * This will define a buffer (previously allocated via
+ * efx_alloc_special_buffer()) in the buffer table, allowing
+ * it to be used for event queues, descriptor rings etc.
+ */
+static void
+efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_qword_t buf_desc;
+ unsigned int index;
+ dma_addr_t dma_addr;
+ int i;
+
+ EFX_BUG_ON_PARANOID(!buffer->buf.addr);
+
+ /* Write buffer descriptors to NIC */
+ for (i = 0; i < buffer->entries; i++) {
+ index = buffer->index + i;
+ dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
+ netif_dbg(efx, probe, efx->net_dev,
+ "mapping special buffer %d at %llx\n",
+ index, (unsigned long long)dma_addr);
+ EFX_POPULATE_QWORD_3(buf_desc,
+ FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+ efx_write_buf_tbl(efx, &buf_desc, index);
+ }
+}
+
+/* Unmaps a buffer and clears the buffer table entries */
+static void
+efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_oword_t buf_tbl_upd;
+ unsigned int start = buffer->index;
+ unsigned int end = (buffer->index + buffer->entries - 1);
+
+ if (!buffer->entries)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
+ buffer->index, buffer->index + buffer->entries - 1);
+
+ EFX_POPULATE_OWORD_4(buf_tbl_upd,
+ FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1,
+ FRF_AZ_BUF_CLR_END_ID, end,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
+}
+
+/*
+ * Allocate a new special buffer
+ *
+ * This allocates memory for a new buffer, clears it and allocates a
+ * new buffer ID range. It does not write into the buffer table.
+ *
+ * This call will allocate 4KB buffers, since 8KB buffers can't be
+ * used for event queues and descriptor rings.
+ */
+static int efx_alloc_special_buffer(struct efx_nic *efx,
+ struct efx_special_buffer *buffer,
+ unsigned int len)
+{
+#ifdef CONFIG_SFC_SRIOV
+ struct siena_nic_data *nic_data = efx->nic_data;
+#endif
+ len = ALIGN(len, EFX_BUF_SIZE);
+
+ if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
+ return -ENOMEM;
+ buffer->entries = len / EFX_BUF_SIZE;
+ BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
+
+ /* Select new buffer ID */
+ buffer->index = efx->next_buffer_table;
+ efx->next_buffer_table += buffer->entries;
+#ifdef CONFIG_SFC_SRIOV
+ BUG_ON(efx_siena_sriov_enabled(efx) &&
+ nic_data->vf_buftbl_base < efx->next_buffer_table);
+#endif
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->buf.dma_addr, len,
+ buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
+
+ return 0;
+}
+
+static void
+efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ if (!buffer->buf.addr)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "deallocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->buf.dma_addr, buffer->buf.len,
+ buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
+
+ efx_nic_free_buffer(efx, &buffer->buf);
+ buffer->entries = 0;
+}
+
+/**************************************************************************
+ *
+ * TX path
+ *
+ **************************************************************************/
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned write_ptr;
+ efx_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
+}
+
+/* Write pointer and first descriptor for TX descriptor ring */
+static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned write_ptr;
+ efx_oword_t reg;
+
+ BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
+ BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
+ FRF_AZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
+}
+
+
+/* For each entry inserted into the software descriptor ring, create a
+ * descriptor in the hardware TX descriptor ring (in host memory), and
+ * write a doorbell.
+ */
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+ efx_qword_t *txd;
+ unsigned write_ptr;
+ unsigned old_write_count = tx_queue->write_count;
+
+ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = efx_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
+
+ /* Create TX descriptor ring entry */
+ BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
+ EFX_POPULATE_QWORD_4(*txd,
+ FSF_AZ_TX_KER_CONT,
+ buffer->flags & EFX_TX_BUF_CONT,
+ FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
+ FSF_AZ_TX_KER_BUF_REGION, 0,
+ FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_farch_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_farch_notify_tx_desc(tx_queue);
+ }
+}
+
+/* Allocate hardware resources for a TX queue */
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned entries;
+
+ entries = tx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &tx_queue->txd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t reg;
+
+ /* Pin TX descriptor ring */
+ efx_init_special_buffer(efx, &tx_queue->txd);
+
+ /* Push TX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(reg,
+ FRF_AZ_TX_DESCQ_EN, 1,
+ FRF_AZ_TX_ISCSI_DDIG_EN, 0,
+ FRF_AZ_TX_ISCSI_HDIG_EN, 0,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
+ FRF_AZ_TX_DESCQ_EVQ_ID,
+ tx_queue->channel->channel,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
+ FRF_AZ_TX_DESCQ_SIZE,
+ __ffs(tx_queue->txd.entries),
+ FRF_AZ_TX_DESCQ_TYPE, 0,
+ FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
+ !csum);
+ }
+
+ efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
+ /* Only 128 bits in this register */
+ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
+
+ efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+ __clear_bit_le(tx_queue->queue, &reg);
+ else
+ __set_bit_le(tx_queue->queue, &reg);
+ efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_1(reg,
+ FRF_BZ_TX_PACE,
+ (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ FFE_BZ_TX_PACE_OFF :
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+ tx_queue->queue);
+ }
+}
+
+static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_flush_descq;
+
+ WARN_ON(atomic_read(&tx_queue->flush_outstanding));
+ atomic_set(&tx_queue->flush_outstanding, 1);
+
+ EFX_POPULATE_OWORD_2(tx_flush_descq,
+ FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
+ efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
+}
+
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_desc_ptr;
+
+ /* Remove TX descriptor ring from card */
+ EFX_ZERO_OWORD(tx_desc_ptr);
+ efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ /* Unpin TX descriptor ring */
+ efx_fini_special_buffer(efx, &tx_queue->txd);
+}
+
+/* Free buffers backing TX queue */
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
+}
+
+/**************************************************************************
+ *
+ * RX path
+ *
+ **************************************************************************/
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_rx_buffer *rx_buf;
+ efx_qword_t *rxd;
+
+ rxd = efx_rx_desc(rx_queue, index);
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ EFX_POPULATE_QWORD_3(*rxd,
+ FSF_AZ_RX_KER_BUF_SIZE,
+ rx_buf->len -
+ rx_queue->efx->type->rx_buffer_padding,
+ FSF_AZ_RX_KER_BUF_REGION, 0,
+ FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+/* This writes to the RX_DESC_WPTR register for the specified receive
+ * descriptor ring.
+ */
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_dword_t reg;
+ unsigned write_ptr;
+
+ while (rx_queue->notified_count != rx_queue->added_count) {
+ efx_farch_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ ++rx_queue->notified_count;
+ }
+
+ wmb();
+ write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
+ efx_rx_queue_index(rx_queue));
+}
+
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned entries;
+
+ entries = rx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &rx_queue->rxd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+ bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
+ bool iscsi_digest_en = is_b0;
+ bool jumbo_en;
+
+ /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
+ * DMA to continue after a PCIe page boundary (and scattering
+ * is not possible). In Falcon B0 and Siena, it enables
+ * scatter.
+ */
+ jumbo_en = !is_b0 || efx->rx_scatter;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "RX queue %d ring in special buffers %d-%d\n",
+ efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
+ rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+
+ rx_queue->scatter_n = 0;
+
+ /* Pin RX descriptor ring */
+ efx_init_special_buffer(efx, &rx_queue->rxd);
+
+ /* Push RX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(rx_desc_ptr,
+ FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
+ FRF_AZ_RX_DESCQ_EVQ_ID,
+ efx_rx_queue_channel(rx_queue)->channel,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL,
+ efx_rx_queue_index(rx_queue),
+ FRF_AZ_RX_DESCQ_SIZE,
+ __ffs(rx_queue->rxd.entries),
+ FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
+ FRF_AZ_RX_DESCQ_EN, 1);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+}
+
+static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_oword_t rx_flush_descq;
+
+ EFX_POPULATE_OWORD_2(rx_flush_descq,
+ FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ,
+ efx_rx_queue_index(rx_queue));
+ efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
+}
+
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+
+ /* Remove RX descriptor ring from card */
+ EFX_ZERO_OWORD(rx_desc_ptr);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+
+ /* Unpin RX descriptor ring */
+ efx_fini_special_buffer(efx, &rx_queue->rxd);
+}
+
+/* Free buffers backing RX queue */
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
+}
+
+/**************************************************************************
+ *
+ * Flush handling
+ *
+ **************************************************************************/
+
+/* efx_farch_flush_queues() must be woken up when all flushes are completed,
+ * or more RX flushes can be kicked off.
+ */
+static bool efx_farch_flush_wake(struct efx_nic *efx)
+{
+ /* Ensure that all updates are visible to efx_farch_flush_queues() */
+ smp_mb();
+
+ return (atomic_read(&efx->active_queues) == 0 ||
+ (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
+ && atomic_read(&efx->rxq_flush_pending) > 0));
+}
+
+static bool efx_check_tx_flush_complete(struct efx_nic *efx)
+{
+ bool i = true;
+ efx_oword_t txd_ptr_tbl;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_reado_table(efx, &txd_ptr_tbl,
+ FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
+ if (EFX_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_FLUSH) ||
+ EFX_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_EN)) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush did not complete on TXQ %d\n",
+ tx_queue->queue);
+ i = false;
+ } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
+ 1, 0)) {
+ /* The flush is complete, but we didn't
+ * receive a flush completion event
+ */
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush complete on TXQ %d, so drain "
+ "the queue\n", tx_queue->queue);
+ /* Don't need to increment active_queues as it
+ * has already been incremented for the queues
+ * which did not drain
+ */
+ efx_farch_magic_event(channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(
+ tx_queue));
+ }
+ }
+ }
+
+ return i;
+}
+
+/* Flush all the transmit queues, and continue flushing receive queues until
+ * they're all flushed. Wait for the DRAIN events to be received so that there
+ * are no more RX and TX events left on any channel. */
+static int efx_farch_do_flush(struct efx_nic *efx)
+{
+ unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int rc = 0;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_farch_flush_tx_queue(tx_queue);
+ }
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ }
+ }
+
+ while (timeout && atomic_read(&efx->active_queues) > 0) {
+ /* If SRIOV is enabled, then offload receive queue flushing to
+ * the firmware (though we will still have to poll for
+ * completion). If that fails, fall back to the old scheme.
+ */
+ if (efx_siena_sriov_enabled(efx)) {
+ rc = efx_mcdi_flush_rxqs(efx);
+ if (!rc)
+ goto wait;
+ }
+
+ /* The hardware supports four concurrent rx flushes, each of
+ * which may need to be retried if there is an outstanding
+ * descriptor fetch
+ */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (atomic_read(&efx->rxq_flush_outstanding) >=
+ EFX_RX_FLUSH_COUNT)
+ break;
+
+ if (rx_queue->flush_pending) {
+ rx_queue->flush_pending = false;
+ atomic_dec(&efx->rxq_flush_pending);
+ atomic_inc(&efx->rxq_flush_outstanding);
+ efx_farch_flush_rx_queue(rx_queue);
+ }
+ }
+ }
+
+ wait:
+ timeout = wait_event_timeout(efx->flush_wq,
+ efx_farch_flush_wake(efx),
+ timeout);
+ }
+
+ if (atomic_read(&efx->active_queues) &&
+ !efx_check_tx_flush_complete(efx)) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
+ "(rx %d+%d)\n", atomic_read(&efx->active_queues),
+ atomic_read(&efx->rxq_flush_outstanding),
+ atomic_read(&efx->rxq_flush_pending));
+ rc = -ETIMEDOUT;
+
+ atomic_set(&efx->active_queues, 0);
+ atomic_set(&efx->rxq_flush_pending, 0);
+ atomic_set(&efx->rxq_flush_outstanding, 0);
+ }
+
+ return rc;
+}
+
+int efx_farch_fini_dmaq(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc = 0;
+
+ /* Do not attempt to write to the NIC during EEH recovery */
+ if (efx->state != STATE_RECOVERY) {
+ /* Only perform flush if DMA is enabled */
+ if (efx->pci_dev->is_busmaster) {
+ efx->type->prepare_flush(efx);
+ rc = efx_farch_do_flush(efx);
+ efx->type->finish_flush(efx);
+ }
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_farch_rx_fini(rx_queue);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_farch_tx_fini(tx_queue);
+ }
+ }
+
+ return rc;
+}
+
+/* Reset queue and flush accounting after FLR
+ *
+ * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
+ * mastering was disabled), in which case we don't receive (RXQ) flush
+ * completion events. This means that efx->rxq_flush_outstanding remained at 4
+ * after the FLR; also, efx->active_queues was non-zero (as no flush completion
+ * events were received, and we didn't go through efx_check_tx_flush_complete())
+ * If we don't fix this up, on the next call to efx_realloc_channels() we won't
+ * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
+ * for batched flush requests; and the efx->active_queues gets messed up because
+ * we keep incrementing for the newly initialised queues, but it never went to
+ * zero previously. Then we get a timeout every time we try to restart the
+ * queues, as it doesn't go back to zero when we should be flushing the queues.
+ */
+void efx_farch_finish_flr(struct efx_nic *efx)
+{
+ atomic_set(&efx->rxq_flush_pending, 0);
+ atomic_set(&efx->rxq_flush_outstanding, 0);
+ atomic_set(&efx->active_queues, 0);
+}
+
+
+/**************************************************************************
+ *
+ * Event queue processing
+ * Event queues are processed by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Update a channel's event queue's read pointer (RPTR) register
+ *
+ * This writes the EVQ_RPTR_REG register for the specified channel's
+ * event queue.
+ */
+void efx_farch_ev_read_ack(struct efx_channel *channel)
+{
+ efx_dword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
+
+ /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
+ * of 4 bytes, but it is really 16 bytes just like later revisions.
+ */
+ efx_writed(efx, &reg,
+ efx->type->evq_rptr_tbl_base +
+ FR_BZ_EVQ_RPTR_STEP * channel->channel);
+}
+
+/* Use HW to insert a SW defined event */
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event)
+{
+ efx_oword_t drv_ev_reg;
+
+ BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
+ FRF_AZ_DRV_EV_DATA_WIDTH != 64);
+ drv_ev_reg.u32[0] = event->u32[0];
+ drv_ev_reg.u32[1] = event->u32[1];
+ drv_ev_reg.u32[2] = 0;
+ drv_ev_reg.u32[3] = 0;
+ EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
+ efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
+}
+
+static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
+{
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_farch_generate_event(channel->efx, channel->channel, &event);
+}
+
+/* Handle a transmit completion event
+ *
+ * The NIC batches TX completion events; the message we receive is of
+ * the form "complete all TX events up to this index".
+ */
+static int
+efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ struct efx_tx_queue *tx_queue;
+ struct efx_nic *efx = channel->efx;
+ int tx_packets = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
+ /* Rewrite the FIFO write pointer */
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+
+ netif_tx_lock(efx->net_dev);
+ efx_farch_notify_tx_desc(tx_queue);
+ netif_tx_unlock(efx->net_dev);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else {
+ netif_err(efx, tx_err, efx->net_dev,
+ "channel %d unexpected TX event "
+ EFX_QWORD_FMT"\n", channel->channel,
+ EFX_QWORD_VAL(*event));
+ }
+
+ return tx_packets;
+}
+
+/* Detect errors included in the rx_evt_pkt_ok bit. */
+static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
+ const efx_qword_t *event)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+ bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+ bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
+ bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+ bool rx_ev_other_err, rx_ev_pause_frm;
+ bool rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned rx_ev_pkt_type;
+
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
+ rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
+ rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
+ rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
+ rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
+ rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
+ rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
+ rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
+ 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
+ rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
+
+ /* Every error apart from tobe_disc and pause_frm */
+ rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+ rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+ rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
+ /* Count errors that are not in MAC stats. Ignore expected
+ * checksum errors during self-test. */
+ if (rx_ev_frm_trunc)
+ ++channel->n_rx_frm_trunc;
+ else if (rx_ev_tobe_disc)
+ ++channel->n_rx_tobe_disc;
+ else if (!efx->loopback_selftest) {
+ if (rx_ev_ip_hdr_chksum_err)
+ ++channel->n_rx_ip_hdr_chksum_err;
+ else if (rx_ev_tcp_udp_chksum_err)
+ ++channel->n_rx_tcp_udp_chksum_err;
+ }
+
+ /* TOBE_DISC is expected on unicast mismatches; don't print out an
+ * error message. FRM_TRUNC indicates RXDP dropped the packet due
+ * to a FIFO overflow.
+ */
+#ifdef DEBUG
+ if (rx_ev_other_err && net_ratelimit()) {
+ netif_dbg(efx, rx_err, efx->net_dev,
+ " RX queue %d unexpected RX event "
+ EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+ efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
+ rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+ rx_ev_ip_hdr_chksum_err ?
+ " [IP_HDR_CHKSUM_ERR]" : "",
+ rx_ev_tcp_udp_chksum_err ?
+ " [TCP_UDP_CHKSUM_ERR]" : "",
+ rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+ rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+ rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+ rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+ rx_ev_pause_frm ? " [PAUSE]" : "");
+ }
+#endif
+
+ /* The frame must be discarded if any of these are true. */
+ return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
+ rx_ev_tobe_disc | rx_ev_pause_frm) ?
+ EFX_RX_PKT_DISCARD : 0;
+}
+
+/* Handle receive events that are not in-order. Return true if this
+ * can be handled as a partial packet discard, false if it's more
+ * serious.
+ */
+static bool
+efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned expected, dropped;
+
+ if (rx_queue->scatter_n &&
+ index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
+ rx_queue->ptr_mask)) {
+ ++channel->n_rx_nodesc_trunc;
+ return true;
+ }
+
+ expected = rx_queue->removed_count & rx_queue->ptr_mask;
+ dropped = (index - expected) & rx_queue->ptr_mask;
+ netif_info(efx, rx_err, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, index, expected);
+
+ efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
+ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return false;
+}
+
+/* Handle a packet received event
+ *
+ * The NIC gives a "discard" flag if it's a unicast packet with the
+ * wrong destination address
+ * Also "is multicast" and "matches multicast filter" flags can be used to
+ * discard non-matching multicast packets.
+ */
+static void
+efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
+{
+ unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
+ unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned expected_ptr;
+ bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
+ u16 flags;
+ struct efx_rx_queue *rx_queue;
+ struct efx_nic *efx = channel->efx;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return;
+
+ rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
+ rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
+ WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
+ channel->channel);
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
+ expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
+ rx_queue->ptr_mask);
+
+ /* Check for partial drops and other errors */
+ if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
+ unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
+ if (rx_ev_desc_ptr != expected_ptr &&
+ !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
+ return;
+
+ /* Discard all pending fragments */
+ if (rx_queue->scatter_n) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ }
+
+ /* Return if there is no new fragment */
+ if (rx_ev_desc_ptr != expected_ptr)
+ return;
+
+ /* Discard new fragment if not SOP */
+ if (!rx_ev_sop) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ 1, 0, EFX_RX_PKT_DISCARD);
+ ++rx_queue->removed_count;
+ return;
+ }
+ }
+
+ ++rx_queue->scatter_n;
+ if (rx_ev_cont)
+ return;
+
+ rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+ rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+
+ if (likely(rx_ev_pkt_ok)) {
+ /* If packet is marked as OK then we can rely on the
+ * hardware checksum and classification.
+ */
+ flags = 0;
+ switch (rx_ev_hdr_type) {
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+ flags |= EFX_RX_PKT_TCP;
+ /* fall through */
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+ flags |= EFX_RX_PKT_CSUMMED;
+ /* fall through */
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+ break;
+ }
+ } else {
+ flags = efx_farch_handle_rx_not_ok(rx_queue, event);
+ }
+
+ /* Detect multicast packets that didn't match the filter */
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ if (rx_ev_mcast_pkt) {
+ unsigned int rx_ev_mcast_hash_match =
+ EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
+
+ if (unlikely(!rx_ev_mcast_hash_match)) {
+ ++channel->n_rx_mcast_mismatch;
+ flags |= EFX_RX_PKT_DISCARD;
+ }
+ }
+
+ channel->irq_mod_score += 2;
+
+ /* Handle received packet */
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_ev_byte_cnt, flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+}
+
+/* If this flush done event corresponds to a &struct efx_tx_queue, then
+ * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
+ * of all transmit completions.
+ */
+static void
+efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_tx_queue *tx_queue;
+ int qid;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+ if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
+ tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
+ qid % EFX_TXQ_TYPES);
+ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
+ efx_farch_magic_event(tx_queue->channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
+ }
+ }
+}
+
+/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
+ * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
+ * the RX queue back to the mask of RX queues in need of flushing.
+ */
+static void
+efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ int qid;
+ bool failed;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+ if (qid >= efx->n_channels)
+ return;
+ channel = efx_get_channel(efx, qid);
+ if (!efx_channel_has_rx_queue(channel))
+ return;
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (failed) {
+ netif_info(efx, hw, efx->net_dev,
+ "RXQ %d flush retry\n", qid);
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ } else {
+ efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
+ EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
+ }
+ atomic_dec(&efx->rxq_flush_outstanding);
+ if (efx_farch_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void
+efx_farch_handle_drain_event(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ WARN_ON(atomic_read(&efx->active_queues) == 0);
+ atomic_dec(&efx->active_queues);
+ if (efx_farch_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void efx_farch_handle_generated_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue =
+ efx_channel_has_rx_queue(channel) ?
+ efx_channel_get_rx_queue(channel) : NULL;
+ unsigned magic, code;
+
+ magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+ code = _EFX_CHANNEL_MAGIC_CODE(magic);
+
+ if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
+ channel->event_test_cpu = raw_smp_processor_id();
+ } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here */
+ efx_fast_push_rx_descriptors(rx_queue, true);
+ } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
+ efx_farch_handle_drain_event(channel);
+ } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
+ efx_farch_handle_drain_event(channel);
+ } else {
+ netif_dbg(efx, hw, efx->net_dev, "channel %d received "
+ "generated event "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(*event));
+ }
+}
+
+static void
+efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int ev_sub_code;
+ unsigned int ev_sub_data;
+
+ ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
+ ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ switch (ev_sub_code) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ efx_farch_handle_tx_flush_done(efx, event);
+ efx_siena_sriov_tx_flush_done(efx, event);
+ break;
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ efx_farch_handle_rx_flush_done(efx, event);
+ efx_siena_sriov_rx_flush_done(efx, event);
+ break;
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d EVQ %d initialised\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_SRM_UPD_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d SRAM update done\n", channel->channel);
+ break;
+ case FSE_AZ_WAKE_UP_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RXQ %d wakeup event\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_TIMER_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RX queue %d timer expired\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AA_RX_RECOVER_EV:
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen DRIVER RX_RESET event. "
+ "Resetting.\n", channel->channel);
+ atomic_inc(&efx->rx_reset);
+ efx_schedule_reset(efx,
+ EFX_WORKAROUND_6555(efx) ?
+ RESET_TYPE_RX_RECOVERY :
+ RESET_TYPE_DISABLE);
+ break;
+ case FSE_BZ_RX_DSC_ERROR_EV:
+ if (ev_sub_data < EFX_VI_BASE) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX DMA Q %d reports descriptor fetch error."
+ " RX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else
+ efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+ break;
+ case FSE_BZ_TX_DSC_ERROR_EV:
+ if (ev_sub_data < EFX_VI_BASE) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX DMA Q %d reports descriptor fetch error."
+ " TX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else
+ efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+ break;
+ default:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d unknown driver event code %d "
+ "data %04x\n", channel->channel, ev_sub_code,
+ ev_sub_data);
+ break;
+ }
+}
+
+int efx_farch_ev_process(struct efx_channel *channel, int budget)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int read_ptr;
+ efx_qword_t event, *p_event;
+ int ev_code;
+ int tx_packets = 0;
+ int spent = 0;
+
+ if (budget <= 0)
+ return spent;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = efx_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!efx_event_present(&event))
+ /* End of events */
+ break;
+
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d event is "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(event));
+
+ /* Clear this event by marking it all ones */
+ EFX_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
+
+ switch (ev_code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ efx_farch_handle_rx_event(channel, &event);
+ if (++spent == budget)
+ goto out;
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ tx_packets += efx_farch_handle_tx_event(channel,
+ &event);
+ if (tx_packets > efx->txq_entries) {
+ spent = budget;
+ goto out;
+ }
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ efx_farch_handle_generated_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ efx_farch_handle_driver_event(channel, &event);
+ break;
+ case FSE_CZ_EV_CODE_USER_EV:
+ efx_siena_sriov_event(channel, &event);
+ break;
+ case FSE_CZ_EV_CODE_MCDI_EV:
+ efx_mcdi_process_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (efx->type->handle_global_event &&
+ efx->type->handle_global_event(channel, &event))
+ break;
+ /* else fall through */
+ default:
+ netif_err(channel->efx, hw, channel->efx->net_dev,
+ "channel %d unknown event type %d (data "
+ EFX_QWORD_FMT ")\n", channel->channel,
+ ev_code, EFX_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+/* Allocate buffer table entries for event queue */
+int efx_farch_ev_probe(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned entries;
+
+ entries = channel->eventq_mask + 1;
+ return efx_alloc_special_buffer(efx, &channel->eventq,
+ entries * sizeof(efx_qword_t));
+}
+
+int efx_farch_ev_init(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d event queue in special buffers %d-%d\n",
+ channel->channel, channel->eventq.index,
+ channel->eventq.index + channel->eventq.entries - 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, 0,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+ }
+
+ /* Pin event queue buffer */
+ efx_init_special_buffer(efx, &channel->eventq);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ /* Push event queue to card */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AZ_EVQ_EN, 1,
+ FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
+ FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
+ efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+
+ return 0;
+}
+
+void efx_farch_ev_fini(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ /* Remove event queue from card */
+ EFX_ZERO_OWORD(reg);
+ efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+
+ /* Unpin event queue */
+ efx_fini_special_buffer(efx, &channel->eventq);
+}
+
+/* Free buffers backing event queue */
+void efx_farch_ev_remove(struct efx_channel *channel)
+{
+ efx_free_special_buffer(channel->efx, &channel->eventq);
+}
+
+
+void efx_farch_ev_test_generate(struct efx_channel *channel)
+{
+ efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
+}
+
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
+{
+ efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
+ EFX_CHANNEL_MAGIC_FILL(rx_queue));
+}
+
+/**************************************************************************
+ *
+ * Hardware interrupts
+ * The hardware interrupt handler does very little work; all the event
+ * queue processing is carried out by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Enable/disable/generate interrupts */
+static inline void efx_farch_interrupts(struct efx_nic *efx,
+ bool enabled, bool force)
+{
+ efx_oword_t int_en_reg_ker;
+
+ EFX_POPULATE_OWORD_3(int_en_reg_ker,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
+ FRF_AZ_KER_INT_KER, force,
+ FRF_AZ_DRV_INT_EN_KER, enabled);
+ efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
+}
+
+void efx_farch_irq_enable_master(struct efx_nic *efx)
+{
+ EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
+ wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
+
+ efx_farch_interrupts(efx, true, false);
+}
+
+void efx_farch_irq_disable_master(struct efx_nic *efx)
+{
+ /* Disable interrupts */
+ efx_farch_interrupts(efx, false, false);
+}
+
+/* Generate a test interrupt
+ * Interrupt must already have been enabled, otherwise nasty things
+ * may happen.
+ */
+void efx_farch_irq_test_generate(struct efx_nic *efx)
+{
+ efx_farch_interrupts(efx, true, true);
+}
+
+/* Process a fatal interrupt
+ * Disable bus mastering ASAP and schedule a reset
+ */
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ efx_oword_t fatal_intr;
+ int error, mem_perr;
+
+ efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
+ error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
+
+ netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
+ EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
+ EFX_OWORD_VAL(fatal_intr),
+ error ? "disabling bus mastering" : "no recognised error");
+
+ /* If this is a memory parity error dump which blocks are offending */
+ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
+ EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
+ if (mem_perr) {
+ efx_oword_t reg;
+ efx_reado(efx, &reg, FR_AZ_MEM_STAT);
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(reg));
+ }
+
+ /* Disable both devices */
+ pci_clear_master(efx->pci_dev);
+ if (efx_nic_is_dual_func(efx))
+ pci_clear_master(nic_data->pci_dev2);
+ efx_farch_irq_disable_master(efx);
+
+ /* Count errors and reset or disable the NIC accordingly */
+ if (efx->int_error_count == 0 ||
+ time_after(jiffies, efx->int_error_expire)) {
+ efx->int_error_count = 0;
+ efx->int_error_expire =
+ jiffies + EFX_INT_ERROR_EXPIRE * HZ;
+ }
+ if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - reset scheduled\n");
+ efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
+ } else {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - max number of errors seen."
+ "NIC will be disabled\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handle a legacy interrupt
+ * Acknowledges the interrupt and schedule event queue processing.
+ */
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ irqreturn_t result = IRQ_NONE;
+ struct efx_channel *channel;
+ efx_dword_t reg;
+ u32 queues;
+ int syserr;
+
+ /* Read the ISR which also ACKs the interrupts */
+ efx_readd(efx, &reg, FR_BZ_INT_ISR0);
+ queues = EFX_EXTRACT_DWORD(reg, 0, 31);
+
+ /* Legacy interrupts are disabled too late by the EEH kernel
+ * code. Disable them earlier.
+ * If an EEH error occurred, the read will have returned all ones.
+ */
+ if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
+ !efx->eeh_disabled_legacy_irq) {
+ disable_irq_nosync(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = true;
+ }
+
+ /* Handle non-event-queue sources */
+ if (queues & (1U << efx->irq_level) && soft_enabled) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_farch_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
+ }
+
+ if (queues != 0) {
+ efx->irq_zero_count = 0;
+
+ /* Schedule processing of any interrupting queues */
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
+ }
+ result = IRQ_HANDLED;
+
+ } else {
+ efx_qword_t *event;
+
+ /* Legacy ISR read can return zero once (SF bug 15783) */
+
+ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
+ * because this might be a shared interrupt. */
+ if (efx->irq_zero_count++ == 0)
+ result = IRQ_HANDLED;
+
+ /* Ensure we schedule or rearm all event queues */
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ event = efx_event(channel,
+ channel->eventq_read_ptr);
+ if (efx_event_present(event))
+ efx_schedule_channel_irq(channel);
+ else
+ efx_farch_ev_read_ack(channel);
+ }
+ }
+ }
+
+ if (result == IRQ_HANDLED)
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+
+ return result;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ int syserr;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
+ /* Handle non-event-queue sources */
+ if (context->index == efx->irq_level) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_farch_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
+ }
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel_irq(efx->channel[context->index]);
+
+ return IRQ_HANDLED;
+}
+
+/* Setup RSS indirection table.
+ * This maps from the hash value of the packet to RXQ
+ */
+void efx_farch_rx_push_indir_table(struct efx_nic *efx)
+{
+ size_t i = 0;
+ efx_dword_t dword;
+
+ BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0);
+
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+ for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
+ EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
+ efx->rx_indir_table[i]);
+ efx_writed(efx, &dword,
+ FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * i);
+ }
+}
+
+/* Looks at available SRAM resources and works out how many queues we
+ * can support, and where things like descriptor caches should live.
+ *
+ * SRAM is split up as follows:
+ * 0 buftbl entries for channels
+ * efx->vf_buftbl_base buftbl entries for SR-IOV
+ * efx->rx_dc_base RX descriptor caches
+ * efx->tx_dc_base TX descriptor caches
+ */
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
+{
+ unsigned vi_count, buftbl_min;
+
+#ifdef CONFIG_SFC_SRIOV
+ struct siena_nic_data *nic_data = efx->nic_data;
+#endif
+
+ /* Account for the buffer table entries backing the datapath channels
+ * and the descriptor caches for those channels.
+ */
+ buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
+ efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+ efx->n_channels * EFX_MAX_EVQ_SIZE)
+ * sizeof(efx_qword_t) / EFX_BUF_SIZE);
+ vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted(efx)) {
+ unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
+
+ nic_data->vf_buftbl_base = buftbl_min;
+
+ vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
+ vi_count = max(vi_count, EFX_VI_BASE);
+ buftbl_free = (sram_lim_qw - buftbl_min -
+ vi_count * vi_dc_entries);
+
+ entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
+ efx_vf_size(efx));
+ vf_limit = min(buftbl_free / entries_per_vf,
+ (1024U - EFX_VI_BASE) >> efx->vi_scale);
+
+ if (efx->vf_count > vf_limit) {
+ netif_err(efx, probe, efx->net_dev,
+ "Reducing VF count from from %d to %d\n",
+ efx->vf_count, vf_limit);
+ efx->vf_count = vf_limit;
+ }
+ vi_count += efx->vf_count * efx_vf_size(efx);
+ }
+#endif
+
+ efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
+ efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
+}
+
+u32 efx_farch_fpga_ver(struct efx_nic *efx)
+{
+ efx_oword_t altera_build;
+ efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
+ return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
+}
+
+void efx_farch_init_common(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set positions of descriptor caches in SRAM. */
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
+ efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
+ efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
+
+ /* Set TX descriptor cache size. */
+ BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
+
+ /* Set RX descriptor cache size. Set low watermark to size-8, as
+ * this allows most efficient prefetching.
+ */
+ BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
+
+ /* Program INT_KER address */
+ EFX_POPULATE_OWORD_2(temp,
+ FRF_AZ_NORM_INT_VEC_DIS_KER,
+ EFX_INT_MODE_USE_MSI(efx),
+ FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
+ efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+
+ if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
+ /* Use an interrupt level unused by event queues */
+ efx->irq_level = 0x1f;
+ else
+ /* Use a valid MSI-X vector */
+ efx->irq_level = 0;
+
+ /* Enable all the genuinely fatal interrupts. (They are still
+ * masked by the overall interrupt mask, controlled by
+ * falcon_interrupts()).
+ *
+ * Note: All other fatal interrupts are enabled
+ */
+ EFX_POPULATE_OWORD_3(temp,
+ FRF_AZ_ILL_ADR_INT_KER_EN, 1,
+ FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
+ FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
+ EFX_INVERT_OWORD(temp);
+ efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
+
+ /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
+ */
+ efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ /* Enable SW_EV to inherit in char driver - assume harmless here */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
+ /* Prefetch threshold 2 => fetch when descriptor cache half empty */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ /* Disable hardware watchdog which can misfire */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+ /* Squash TX of packets of 16 bytes or less */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_4(temp,
+ /* Default values */
+ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+ FRF_BZ_TX_PACE_SB_AF, 0xb,
+ FRF_BZ_TX_PACE_FB_BASE, 0,
+ /* Allow large pace values in the
+ * fast bin. */
+ FRF_BZ_TX_PACE_BIN_TH,
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+ }
+}
+
+/**************************************************************************
+ *
+ * Filter tables
+ *
+ **************************************************************************
+ */
+
+/* "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
+#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/* Hard maximum search limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_farch_filter_search() when the
+ * table is full.
+ */
+#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
+
+/* Don't try very hard to find space for performance hints, as this is
+ * counter-productive. */
+#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
+
+enum efx_farch_filter_type {
+ EFX_FARCH_FILTER_TCP_FULL = 0,
+ EFX_FARCH_FILTER_TCP_WILD,
+ EFX_FARCH_FILTER_UDP_FULL,
+ EFX_FARCH_FILTER_UDP_WILD,
+ EFX_FARCH_FILTER_MAC_FULL = 4,
+ EFX_FARCH_FILTER_MAC_WILD,
+ EFX_FARCH_FILTER_UC_DEF = 8,
+ EFX_FARCH_FILTER_MC_DEF,
+ EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
+};
+
+enum efx_farch_filter_table_id {
+ EFX_FARCH_FILTER_TABLE_RX_IP = 0,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_DEF,
+ EFX_FARCH_FILTER_TABLE_TX_MAC,
+ EFX_FARCH_FILTER_TABLE_COUNT,
+};
+
+enum efx_farch_filter_index {
+ EFX_FARCH_FILTER_INDEX_UC_DEF,
+ EFX_FARCH_FILTER_INDEX_MC_DEF,
+ EFX_FARCH_FILTER_SIZE_RX_DEF,
+};
+
+struct efx_farch_filter_spec {
+ u8 type:4;
+ u8 priority:4;
+ u8 flags;
+ u16 dmaq_id;
+ u32 data[3];
+};
+
+struct efx_farch_filter_table {
+ enum efx_farch_filter_table_id id;
+ u32 offset; /* address of table relative to BAR */
+ unsigned size; /* number of entries */
+ unsigned step; /* step between entries */
+ unsigned used; /* number currently used */
+ unsigned long *used_bitmap;
+ struct efx_farch_filter_spec *spec;
+ unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
+};
+
+struct efx_farch_filter_state {
+ struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
+};
+
+static void
+efx_farch_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx);
+
+/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple. The initial LFSR state is 0xffff. */
+static u16 efx_farch_filter_hash(u32 key)
+{
+ u16 tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ key >> 16;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ key;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ return tmp ^ tmp >> 9;
+}
+
+/* To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash. */
+static u16 efx_farch_filter_increment(u32 key)
+{
+ return key * 2 - 1;
+}
+
+static enum efx_farch_filter_table_id
+efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
+{
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_TCP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_TCP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_UDP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_UDP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
+ (EFX_FARCH_FILTER_MAC_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
+ (EFX_FARCH_FILTER_MAC_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
+ EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
+ return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
+}
+
+static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter_ctl;
+
+ efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
+ table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
+ EFX_FILTER_FLAG_RX_RSS));
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
+ table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_RSS));
+
+ /* There is a single bit to enable RX scatter for all
+ * unmatched packets. Only set it if scatter is
+ * enabled in both filter specs.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
+ table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_SCATTER));
+ } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ /* We don't expose 'default' filters because unmatched
+ * packets always go to the queue number found in the
+ * RSS table. But we still need to set the RX scatter
+ * bit here.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ efx->rx_scatter);
+ }
+
+ efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+}
+
+static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ efx_oword_t tx_cfg;
+
+ efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
+ table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
+ table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
+}
+
+static int
+efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
+ const struct efx_filter_spec *gen_spec)
+{
+ bool is_full = false;
+
+ if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
+ gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
+ return -EINVAL;
+
+ spec->priority = gen_spec->priority;
+ spec->flags = gen_spec->flags;
+ spec->dmaq_id = gen_spec->dmaq_id;
+
+ switch (gen_spec->match_flags) {
+ case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
+ is_full = true;
+ /* fall through */
+ case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
+ __be32 rhost, host1, host2;
+ __be16 rport, port1, port2;
+
+ EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
+
+ if (gen_spec->ether_type != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+ if (gen_spec->loc_port == 0 ||
+ (is_full && gen_spec->rem_port == 0))
+ return -EADDRNOTAVAIL;
+ switch (gen_spec->ip_proto) {
+ case IPPROTO_TCP:
+ spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
+ EFX_FARCH_FILTER_TCP_WILD);
+ break;
+ case IPPROTO_UDP:
+ spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
+ EFX_FARCH_FILTER_UDP_WILD);
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ /* Filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * (= zero for wildcard) addresses.
+ */
+ rhost = is_full ? gen_spec->rem_host[0] : 0;
+ rport = is_full ? gen_spec->rem_port : 0;
+ host1 = rhost;
+ host2 = gen_spec->loc_host[0];
+ if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
+ port1 = gen_spec->loc_port;
+ port2 = rport;
+ } else {
+ port1 = rport;
+ port2 = gen_spec->loc_port;
+ }
+ spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
+ spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
+ spec->data[2] = ntohl(host2);
+
+ break;
+ }
+
+ case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
+ is_full = true;
+ /* fall through */
+ case EFX_FILTER_MATCH_LOC_MAC:
+ spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
+ EFX_FARCH_FILTER_MAC_WILD);
+ spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
+ spec->data[1] = (gen_spec->loc_mac[2] << 24 |
+ gen_spec->loc_mac[3] << 16 |
+ gen_spec->loc_mac[4] << 8 |
+ gen_spec->loc_mac[5]);
+ spec->data[2] = (gen_spec->loc_mac[0] << 8 |
+ gen_spec->loc_mac[1]);
+ break;
+
+ case EFX_FILTER_MATCH_LOC_MAC_IG:
+ spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
+ EFX_FARCH_FILTER_MC_DEF :
+ EFX_FARCH_FILTER_UC_DEF);
+ memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
+ break;
+
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ return 0;
+}
+
+static void
+efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
+ const struct efx_farch_filter_spec *spec)
+{
+ bool is_full = false;
+
+ /* *gen_spec should be completely initialised, to be consistent
+ * with efx_filter_init_{rx,tx}() and in case we want to copy
+ * it back to userland.
+ */
+ memset(gen_spec, 0, sizeof(*gen_spec));
+
+ gen_spec->priority = spec->priority;
+ gen_spec->flags = spec->flags;
+ gen_spec->dmaq_id = spec->dmaq_id;
+
+ switch (spec->type) {
+ case EFX_FARCH_FILTER_TCP_FULL:
+ case EFX_FARCH_FILTER_UDP_FULL:
+ is_full = true;
+ /* fall through */
+ case EFX_FARCH_FILTER_TCP_WILD:
+ case EFX_FARCH_FILTER_UDP_WILD: {
+ __be32 host1, host2;
+ __be16 port1, port2;
+
+ gen_spec->match_flags =
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ if (is_full)
+ gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_REM_PORT);
+ gen_spec->ether_type = htons(ETH_P_IP);
+ gen_spec->ip_proto =
+ (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
+ spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
+ IPPROTO_TCP : IPPROTO_UDP;
+
+ host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
+ port1 = htons(spec->data[0]);
+ host2 = htonl(spec->data[2]);
+ port2 = htons(spec->data[1] >> 16);
+ if (spec->flags & EFX_FILTER_FLAG_TX) {
+ gen_spec->loc_host[0] = host1;
+ gen_spec->rem_host[0] = host2;
+ } else {
+ gen_spec->loc_host[0] = host2;
+ gen_spec->rem_host[0] = host1;
+ }
+ if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
+ (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
+ gen_spec->loc_port = port1;
+ gen_spec->rem_port = port2;
+ } else {
+ gen_spec->loc_port = port2;
+ gen_spec->rem_port = port1;
+ }
+
+ break;
+ }
+
+ case EFX_FARCH_FILTER_MAC_FULL:
+ is_full = true;
+ /* fall through */
+ case EFX_FARCH_FILTER_MAC_WILD:
+ gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
+ if (is_full)
+ gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ gen_spec->loc_mac[0] = spec->data[2] >> 8;
+ gen_spec->loc_mac[1] = spec->data[2];
+ gen_spec->loc_mac[2] = spec->data[1] >> 24;
+ gen_spec->loc_mac[3] = spec->data[1] >> 16;
+ gen_spec->loc_mac[4] = spec->data[1] >> 8;
+ gen_spec->loc_mac[5] = spec->data[1];
+ gen_spec->outer_vid = htons(spec->data[0]);
+ break;
+
+ case EFX_FARCH_FILTER_UC_DEF:
+ case EFX_FARCH_FILTER_MC_DEF:
+ gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
+ gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void
+efx_farch_filter_init_rx_auto(struct efx_nic *efx,
+ struct efx_farch_filter_spec *spec)
+{
+ /* If there's only one channel then disable RSS for non VF
+ * traffic, thereby allowing VFs to use RSS when the PF can't.
+ */
+ spec->priority = EFX_FILTER_PRI_AUTO;
+ spec->flags = (EFX_FILTER_FLAG_RX |
+ (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
+ (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
+ spec->dmaq_id = 0;
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static u32 efx_farch_filter_build(efx_oword_t *filter,
+ struct efx_farch_filter_spec *spec)
+{
+ u32 data3;
+
+ switch (efx_farch_filter_spec_table_id(spec)) {
+ case EFX_FARCH_FILTER_TABLE_RX_IP: {
+ bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
+ spec->type == EFX_FARCH_FILTER_UDP_WILD);
+ EFX_POPULATE_OWORD_7(
+ *filter,
+ FRF_BZ_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_BZ_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_BZ_TCP_UDP, is_udp,
+ FRF_BZ_RXQ_ID, spec->dmaq_id,
+ EFX_DWORD_2, spec->data[2],
+ EFX_DWORD_1, spec->data[1],
+ EFX_DWORD_0, spec->data[0]);
+ data3 = is_udp;
+ break;
+ }
+
+ case EFX_FARCH_FILTER_TABLE_RX_MAC: {
+ bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
+ EFX_POPULATE_OWORD_7(
+ *filter,
+ FRF_CZ_RMFT_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_CZ_RMFT_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
+ FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild;
+ break;
+ }
+
+ case EFX_FARCH_FILTER_TABLE_TX_MAC: {
+ bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
+ FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild | spec->dmaq_id << 1;
+ break;
+ }
+
+ default:
+ BUG();
+ }
+
+ return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
+}
+
+static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
+ const struct efx_farch_filter_spec *right)
+{
+ if (left->type != right->type ||
+ memcmp(left->data, right->data, sizeof(left->data)))
+ return false;
+
+ if (left->flags & EFX_FILTER_FLAG_TX &&
+ left->dmaq_id != right->dmaq_id)
+ return false;
+
+ return true;
+}
+
+/*
+ * Construct/deconstruct external filter IDs. At least the RX filter
+ * IDs must be ordered by matching priority, for RX NFC semantics.
+ *
+ * Deconstruction needs to be robust against invalid IDs so that
+ * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
+ * accept user-provided IDs.
+ */
+
+#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
+
+static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
+ [EFX_FARCH_FILTER_TCP_FULL] = 0,
+ [EFX_FARCH_FILTER_UDP_FULL] = 0,
+ [EFX_FARCH_FILTER_TCP_WILD] = 1,
+ [EFX_FARCH_FILTER_UDP_WILD] = 1,
+ [EFX_FARCH_FILTER_MAC_FULL] = 2,
+ [EFX_FARCH_FILTER_MAC_WILD] = 3,
+ [EFX_FARCH_FILTER_UC_DEF] = 4,
+ [EFX_FARCH_FILTER_MC_DEF] = 4,
+};
+
+static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
+ EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
+ EFX_FARCH_FILTER_TABLE_RX_IP,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
+ EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
+ EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
+};
+
+#define EFX_FARCH_FILTER_INDEX_WIDTH 13
+#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
+
+static inline u32
+efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
+ unsigned int index)
+{
+ unsigned int range;
+
+ range = efx_farch_filter_type_match_pri[spec->type];
+ if (!(spec->flags & EFX_FILTER_FLAG_RX))
+ range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
+
+ return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
+}
+
+static inline enum efx_farch_filter_table_id
+efx_farch_filter_id_table_id(u32 id)
+{
+ unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
+
+ if (range < ARRAY_SIZE(efx_farch_filter_range_table))
+ return efx_farch_filter_range_table[range];
+ else
+ return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
+}
+
+static inline unsigned int efx_farch_filter_id_index(u32 id)
+{
+ return id & EFX_FARCH_FILTER_INDEX_MASK;
+}
+
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
+ enum efx_farch_filter_table_id table_id;
+
+ do {
+ table_id = efx_farch_filter_range_table[range];
+ if (state->table[table_id].size != 0)
+ return range << EFX_FARCH_FILTER_INDEX_WIDTH |
+ state->table[table_id].size;
+ } while (range--);
+
+ return 0;
+}
+
+s32 efx_farch_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *gen_spec,
+ bool replace_equal)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ struct efx_farch_filter_spec spec;
+ efx_oword_t filter;
+ int rep_index, ins_index;
+ unsigned int depth = 0;
+ int rc;
+
+ rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
+ if (rc)
+ return rc;
+
+ table = &state->table[efx_farch_filter_spec_table_id(&spec)];
+ if (table->size == 0)
+ return -EINVAL;
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: type %d search_limit=%d", __func__, spec.type,
+ table->search_limit[spec.type]);
+
+ if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
+ /* One filter spec per type */
+ BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
+ BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
+ EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
+ rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
+ ins_index = rep_index;
+
+ spin_lock_bh(&efx->filter_lock);
+ } else {
+ /* Search concurrently for
+ * (1) a filter to be replaced (rep_index): any filter
+ * with the same match values, up to the current
+ * search depth for this type, and
+ * (2) the insertion point (ins_index): (1) or any
+ * free slot before it or up to the maximum search
+ * depth for this priority
+ * We fail if we cannot find (2).
+ *
+ * We can stop once either
+ * (a) we find (1), in which case we have definitely
+ * found (2) as well; or
+ * (b) we have searched exhaustively for (1), and have
+ * either found (2) or searched exhaustively for it
+ */
+ u32 key = efx_farch_filter_build(&filter, &spec);
+ unsigned int hash = efx_farch_filter_hash(key);
+ unsigned int incr = efx_farch_filter_increment(key);
+ unsigned int max_rep_depth = table->search_limit[spec.type];
+ unsigned int max_ins_depth =
+ spec.priority <= EFX_FILTER_PRI_HINT ?
+ EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
+ EFX_FARCH_FILTER_CTL_SRCH_MAX;
+ unsigned int i = hash & (table->size - 1);
+
+ ins_index = -1;
+ depth = 1;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (;;) {
+ if (!test_bit(i, table->used_bitmap)) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_farch_filter_equal(&spec,
+ &table->spec[i])) {
+ /* Case (a) */
+ if (ins_index < 0)
+ ins_index = i;
+ rep_index = i;
+ break;
+ }
+
+ if (depth >= max_rep_depth &&
+ (ins_index >= 0 || depth >= max_ins_depth)) {
+ /* Case (b) */
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out;
+ }
+ rep_index = -1;
+ break;
+ }
+
+ i = (i + incr) & (table->size - 1);
+ ++depth;
+ }
+ }
+
+ /* If we found a filter to be replaced, check whether we
+ * should do so
+ */
+ if (rep_index >= 0) {
+ struct efx_farch_filter_spec *saved_spec =
+ &table->spec[rep_index];
+
+ if (spec.priority == saved_spec->priority && !replace_equal) {
+ rc = -EEXIST;
+ goto out;
+ }
+ if (spec.priority < saved_spec->priority) {
+ rc = -EPERM;
+ goto out;
+ }
+ if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
+ saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
+ spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
+ }
+
+ /* Insert the filter */
+ if (ins_index != rep_index) {
+ __set_bit(ins_index, table->used_bitmap);
+ ++table->used;
+ }
+ table->spec[ins_index] = spec;
+
+ if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
+ efx_farch_filter_push_rx_config(efx);
+ } else {
+ if (table->search_limit[spec.type] < depth) {
+ table->search_limit[spec.type] = depth;
+ if (spec.flags & EFX_FILTER_FLAG_TX)
+ efx_farch_filter_push_tx_limits(efx);
+ else
+ efx_farch_filter_push_rx_config(efx);
+ }
+
+ efx_writeo(efx, &filter,
+ table->offset + table->step * ins_index);
+
+ /* If we were able to replace a filter by inserting
+ * at a lower depth, clear the replaced filter
+ */
+ if (ins_index != rep_index && rep_index >= 0)
+ efx_farch_filter_table_clear_entry(efx, table,
+ rep_index);
+ }
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: filter type %d index %d rxq %u set",
+ __func__, spec.type, ins_index, spec.dmaq_id);
+ rc = efx_farch_filter_make_id(&spec, ins_index);
+
+out:
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void
+efx_farch_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx)
+{
+ static efx_oword_t filter;
+
+ EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
+ BUG_ON(table->offset == 0); /* can't clear MAC default filters */
+
+ __clear_bit(filter_idx, table->used_bitmap);
+ --table->used;
+ memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
+
+ efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
+
+ /* If this filter required a greater search depth than
+ * any other, the search limit for its type can now be
+ * decreased. However, it is hard to determine that
+ * unless the table has become completely empty - in
+ * which case, all its search limits can be set to 0.
+ */
+ if (unlikely(table->used == 0)) {
+ memset(table->search_limit, 0, sizeof(table->search_limit));
+ if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
+ efx_farch_filter_push_tx_limits(efx);
+ else
+ efx_farch_filter_push_rx_config(efx);
+ }
+}
+
+static int efx_farch_filter_remove(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
+
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ spec->priority != priority)
+ return -ENOENT;
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
+ efx_farch_filter_init_rx_auto(efx, spec);
+ efx_farch_filter_push_rx_config(efx);
+ } else {
+ efx_farch_filter_table_clear_entry(efx, table, filter_idx);
+ }
+
+ return 0;
+}
+
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ struct efx_farch_filter_spec *spec;
+ int rc;
+
+ table_id = efx_farch_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_farch_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ spin_lock_bh(&efx->filter_lock);
+ rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
+ spin_unlock_bh(&efx->filter_lock);
+
+ return rc;
+}
+
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec_buf)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ struct efx_farch_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ table_id = efx_farch_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_farch_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ spin_lock_bh(&efx->filter_lock);
+
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ spec->priority == priority) {
+ efx_farch_filter_to_gen_spec(spec_buf, spec);
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ return rc;
+}
+
+static void
+efx_farch_filter_table_clear(struct efx_nic *efx,
+ enum efx_farch_filter_table_id table_id,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table = &state->table[table_id];
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
+ if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
+ efx_farch_filter_remove(efx, table,
+ filter_idx, priority);
+ }
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
+ priority);
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
+ priority);
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
+ priority);
+ return 0;
+}
+
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ u32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority)
+ ++count;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ return count;
+}
+
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ goto out;
+ }
+ buf[count++] = efx_farch_filter_make_id(
+ &table->spec[filter_idx], filter_idx);
+ }
+ }
+ }
+out:
+ spin_unlock_bh(&efx->filter_lock);
+
+ return count;
+}
+
+/* Restore filter stater after reset */
+void efx_farch_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+
+ /* Check whether this is a regular register table */
+ if (table->step == 0)
+ continue;
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap))
+ continue;
+ efx_farch_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+ efx_farch_filter_push_tx_limits(efx);
+
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+void efx_farch_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ kfree(state->table[table_id].used_bitmap);
+ vfree(state->table[table_id].spec);
+ }
+ kfree(state);
+}
+
+int efx_farch_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state;
+ struct efx_farch_filter_table *table;
+ unsigned table_id;
+
+ state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+ efx->filter_state = state;
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table->offset = FR_BZ_RX_FILTER_TBL0;
+ table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
+ table->step = FR_BZ_RX_FILTER_TBL0_STEP;
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
+ table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
+ table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
+ table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
+ }
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+ if (table->size == 0)
+ continue;
+ table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!table->used_bitmap)
+ goto fail;
+ table->spec = vzalloc(table->size * sizeof(*table->spec));
+ if (!table->spec)
+ goto fail;
+ }
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ /* RX default filters must always exist */
+ struct efx_farch_filter_spec *spec;
+ unsigned i;
+
+ for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
+ spec = &table->spec[i];
+ spec->type = EFX_FARCH_FILTER_UC_DEF + i;
+ efx_farch_filter_init_rx_auto(efx, spec);
+ __set_bit(i, table->used_bitmap);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+
+ return 0;
+
+fail:
+ efx_farch_filter_table_remove(efx);
+ return -ENOMEM;
+}
+
+/* Update scatter enable flags for filters pointing to our own RX queues */
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ table->spec[filter_idx].dmaq_id >=
+ efx->n_rx_channels)
+ continue;
+
+ if (efx->rx_scatter)
+ table->spec[filter_idx].flags |=
+ EFX_FILTER_FLAG_RX_SCATTER;
+ else
+ table->spec[filter_idx].flags &=
+ ~EFX_FILTER_FLAG_RX_SCATTER;
+
+ if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
+ /* Pushed by efx_farch_filter_push_rx_config() */
+ continue;
+
+ efx_farch_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *gen_spec)
+{
+ return efx_farch_filter_insert(efx, gen_spec, true);
+}
+
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table =
+ &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+
+ if (test_bit(index, table->used_bitmap) &&
+ table->spec[index].priority == EFX_FILTER_PRI_HINT &&
+ rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
+ flow_id, index)) {
+ efx_farch_filter_table_clear_entry(efx, table, index);
+ return true;
+ }
+
+ return false;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *ha;
+ union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+ u32 crc;
+ int bit;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ netif_addr_lock_bh(net_dev);
+
+ efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
+
+ /* Build multicast hash table */
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ memset(mc_hash, 0xff, sizeof(*mc_hash));
+ } else {
+ memset(mc_hash, 0x00, sizeof(*mc_hash));
+ netdev_for_each_mc_addr(ha, net_dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
+ __set_bit_le(bit, mc_hash);
+ }
+
+ /* Broadcast packets go through the multicast hash filter.
+ * ether_crc_le() of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask.
+ */
+ __set_bit_le(0xff, mc_hash);
+ }
+
+ netif_addr_unlock_bh(net_dev);
+}
diff --git a/kernel/drivers/net/ethernet/sfc/farch_regs.h b/kernel/drivers/net/ethernet/sfc/farch_regs.h
new file mode 100644
index 000000000..7019a712e
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/farch_regs.h
@@ -0,0 +1,2932 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_FARCH_REGS_H
+#define EFX_FARCH_REGS_H
+
+/*
+ * Falcon hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ * F<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ * MMIO register MC register Host memory structure
+ * -------------------------------------------------------------
+ * Address R MCR
+ * Bitfield RF MCRF SF
+ * Enumerator FE MCFE SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ * A: Falcon A1 (SFC4000AB)
+ * B: Falcon B0 (SFC4000BA)
+ * C: Siena A0 (SFL9021AA)
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * Falcon/Siena registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* ADR_REGION_REG: Address region register */
+#define FR_AZ_ADR_REGION 0x00000000
+#define FRF_AZ_ADR_REGION3_LBN 96
+#define FRF_AZ_ADR_REGION3_WIDTH 18
+#define FRF_AZ_ADR_REGION2_LBN 64
+#define FRF_AZ_ADR_REGION2_WIDTH 18
+#define FRF_AZ_ADR_REGION1_LBN 32
+#define FRF_AZ_ADR_REGION1_WIDTH 18
+#define FRF_AZ_ADR_REGION0_LBN 0
+#define FRF_AZ_ADR_REGION0_WIDTH 18
+
+/* INT_EN_REG_KER: Kernel driver Interrupt enable register */
+#define FR_AZ_INT_EN_KER 0x00000010
+#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_KER_INT_CHAR_LBN 4
+#define FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define FRF_AZ_KER_INT_KER_LBN 3
+#define FRF_AZ_KER_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+/* INT_EN_REG_CHAR: Char Driver interrupt enable register */
+#define FR_BZ_INT_EN_CHAR 0x00000020
+#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8
+#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define FRF_BZ_CHAR_INT_CHAR_LBN 4
+#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1
+#define FRF_BZ_CHAR_INT_KER_LBN 3
+#define FRF_BZ_CHAR_INT_KER_WIDTH 1
+#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0
+#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1
+
+/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */
+#define FR_AZ_INT_ADR_KER 0x00000030
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define FRF_AZ_INT_ADR_KER_LBN 0
+#define FRF_AZ_INT_ADR_KER_WIDTH 64
+
+/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */
+#define FR_BZ_INT_ADR_CHAR 0x00000040
+#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define FRF_BZ_INT_ADR_CHAR_LBN 0
+#define FRF_BZ_INT_ADR_CHAR_WIDTH 64
+
+/* INT_ACK_KER: Kernel interrupt acknowledge register */
+#define FR_AA_INT_ACK_KER 0x00000050
+#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */
+#define FR_BZ_INT_ISR0 0x00000090
+#define FRF_BZ_INT_ISR_REG_LBN 0
+#define FRF_BZ_INT_ISR_REG_WIDTH 64
+
+/* HW_INIT_REG: Hardware initialization register */
+#define FR_AZ_HW_INIT 0x000000c0
+#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define FRF_CZ_TX_MRG_TAGS_LBN 120
+#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define FRF_AB_TRGT_MASK_ALL_LBN 100
+#define FRF_AB_TRGT_MASK_ALL_WIDTH 1
+#define FRF_AZ_DOORBELL_DROP_LBN 92
+#define FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define FRF_AB_PE_EIDLE_DIS_LBN 75
+#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define FRF_AA_FC_BLOCKING_EN_LBN 45
+#define FRF_AA_FC_BLOCKING_EN_WIDTH 1
+#define FRF_BZ_B2B_REQ_EN_LBN 45
+#define FRF_BZ_B2B_REQ_EN_WIDTH 1
+#define FRF_AA_B2B_REQ_EN_LBN 44
+#define FRF_AA_B2B_REQ_EN_WIDTH 1
+#define FRF_BB_FC_BLOCKING_EN_LBN 44
+#define FRF_BB_FC_BLOCKING_EN_WIDTH 1
+#define FRF_AZ_POST_WR_MASK_LBN 40
+#define FRF_AZ_POST_WR_MASK_WIDTH 4
+#define FRF_AZ_TLP_TC_LBN 34
+#define FRF_AZ_TLP_TC_WIDTH 3
+#define FRF_AZ_TLP_ATTR_LBN 32
+#define FRF_AZ_TLP_ATTR_WIDTH 2
+#define FRF_AB_INTB_VEC_LBN 24
+#define FRF_AB_INTB_VEC_WIDTH 5
+#define FRF_AB_INTA_VEC_LBN 16
+#define FRF_AB_INTA_VEC_WIDTH 5
+#define FRF_AZ_WD_TIMER_LBN 8
+#define FRF_AZ_WD_TIMER_WIDTH 8
+#define FRF_AZ_US_DISABLE_LBN 5
+#define FRF_AZ_US_DISABLE_WIDTH 1
+#define FRF_AZ_TLP_EP_LBN 4
+#define FRF_AZ_TLP_EP_WIDTH 1
+#define FRF_AZ_ATTR_SEL_LBN 3
+#define FRF_AZ_ATTR_SEL_WIDTH 1
+#define FRF_AZ_TD_SEL_LBN 1
+#define FRF_AZ_TD_SEL_WIDTH 1
+#define FRF_AZ_TLP_TD_LBN 0
+#define FRF_AZ_TLP_TD_WIDTH 1
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+#define FR_AB_EE_SPI_HCMD 0x00000100
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+/* USR_EV_CFG: User Level Event Configuration register */
+#define FR_CZ_USR_EV_CFG 0x00000100
+#define FRF_CZ_USREV_DIS_LBN 16
+#define FRF_CZ_USREV_DIS_WIDTH 1
+#define FRF_CZ_DFLT_EVQ_LBN 0
+#define FRF_CZ_DFLT_EVQ_WIDTH 10
+
+/* EE_SPI_HADR_REG: SPI host address register */
+#define FR_AB_EE_SPI_HADR 0x00000110
+#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+/* EE_SPI_HDATA_REG: SPI host data register */
+#define FR_AB_EE_SPI_HDATA 0x00000120
+#define FRF_AB_EE_SPI_HDATA3_LBN 96
+#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA2_LBN 64
+#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA1_LBN 32
+#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA0_LBN 0
+#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */
+#define FR_AB_EE_BASE_PAGE 0x00000130
+#define FRF_AB_EE_EXPROM_MASK_LBN 16
+#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */
+#define FR_AB_EE_VPD_CFG0 0x00000140
+#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPDW_BASE_LBN 64
+#define FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define FRF_AB_EE_VPD_BASE_LBN 32
+#define FRF_AB_EE_VPD_BASE_WIDTH 24
+#define FRF_AB_EE_VPD_LENGTH_LBN 16
+#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define FRF_AB_EE_VPD_EN_LBN 0
+#define FRF_AB_EE_VPD_EN_WIDTH 1
+
+/* EE_VPD_SW_CNTL_REG: VPD access SW control register */
+#define FR_AB_EE_VPD_SW_CNTL 0x00000150
+#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+/* EE_VPD_SW_DATA_REG: VPD access SW data register */
+#define FR_AB_EE_VPD_SW_DATA 0x00000160
+#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+/* PBMX_DBG_IADDR_REG: Capture Module address register */
+#define FR_CZ_PBMX_DBG_IADDR 0x000001f0
+#define FRF_CZ_PBMX_DBG_IADDR_LBN 0
+#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32
+
+/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */
+#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0
+#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+/* PBMX_DBG_IDATA_REG: Capture Module data register */
+#define FR_CZ_PBMX_DBG_IDATA 0x000001f8
+#define FRF_CZ_PBMX_DBG_IDATA_LBN 0
+#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64
+
+/* NIC_STAT_REG: NIC status register */
+#define FR_AB_NIC_STAT 0x00000200
+#define FRF_BB_AER_DIS_LBN 34
+#define FRF_BB_AER_DIS_WIDTH 1
+#define FRF_BB_EE_STRAP_EN_LBN 31
+#define FRF_BB_EE_STRAP_EN_WIDTH 1
+#define FRF_BB_EE_STRAP_LBN 24
+#define FRF_BB_EE_STRAP_WIDTH 4
+#define FRF_BB_REVISION_ID_LBN 17
+#define FRF_BB_REVISION_ID_WIDTH 7
+#define FRF_AB_ONCHIP_SRAM_LBN 16
+#define FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define FRF_AB_SF_PRST_LBN 9
+#define FRF_AB_SF_PRST_WIDTH 1
+#define FRF_AB_EE_PRST_LBN 8
+#define FRF_AB_EE_PRST_WIDTH 1
+#define FRF_AB_ATE_MODE_LBN 3
+#define FRF_AB_ATE_MODE_WIDTH 1
+#define FRF_AB_STRAP_PINS_LBN 0
+#define FRF_AB_STRAP_PINS_WIDTH 3
+
+/* GPIO_CTL_REG: GPIO control register */
+#define FR_AB_GPIO_CTL 0x00000210
+#define FRF_AB_GPIO_OUT3_LBN 112
+#define FRF_AB_GPIO_OUT3_WIDTH 16
+#define FRF_AB_GPIO_IN3_LBN 104
+#define FRF_AB_GPIO_IN3_WIDTH 8
+#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96
+#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8
+#define FRF_AB_GPIO_OUT2_LBN 80
+#define FRF_AB_GPIO_OUT2_WIDTH 16
+#define FRF_AB_GPIO_IN2_LBN 72
+#define FRF_AB_GPIO_IN2_WIDTH 8
+#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64
+#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8
+#define FRF_AB_GPIO15_OEN_LBN 63
+#define FRF_AB_GPIO15_OEN_WIDTH 1
+#define FRF_AB_GPIO14_OEN_LBN 62
+#define FRF_AB_GPIO14_OEN_WIDTH 1
+#define FRF_AB_GPIO13_OEN_LBN 61
+#define FRF_AB_GPIO13_OEN_WIDTH 1
+#define FRF_AB_GPIO12_OEN_LBN 60
+#define FRF_AB_GPIO12_OEN_WIDTH 1
+#define FRF_AB_GPIO11_OEN_LBN 59
+#define FRF_AB_GPIO11_OEN_WIDTH 1
+#define FRF_AB_GPIO10_OEN_LBN 58
+#define FRF_AB_GPIO10_OEN_WIDTH 1
+#define FRF_AB_GPIO9_OEN_LBN 57
+#define FRF_AB_GPIO9_OEN_WIDTH 1
+#define FRF_AB_GPIO8_OEN_LBN 56
+#define FRF_AB_GPIO8_OEN_WIDTH 1
+#define FRF_AB_GPIO15_OUT_LBN 55
+#define FRF_AB_GPIO15_OUT_WIDTH 1
+#define FRF_AB_GPIO14_OUT_LBN 54
+#define FRF_AB_GPIO14_OUT_WIDTH 1
+#define FRF_AB_GPIO13_OUT_LBN 53
+#define FRF_AB_GPIO13_OUT_WIDTH 1
+#define FRF_AB_GPIO12_OUT_LBN 52
+#define FRF_AB_GPIO12_OUT_WIDTH 1
+#define FRF_AB_GPIO11_OUT_LBN 51
+#define FRF_AB_GPIO11_OUT_WIDTH 1
+#define FRF_AB_GPIO10_OUT_LBN 50
+#define FRF_AB_GPIO10_OUT_WIDTH 1
+#define FRF_AB_GPIO9_OUT_LBN 49
+#define FRF_AB_GPIO9_OUT_WIDTH 1
+#define FRF_AB_GPIO8_OUT_LBN 48
+#define FRF_AB_GPIO8_OUT_WIDTH 1
+#define FRF_AB_GPIO15_IN_LBN 47
+#define FRF_AB_GPIO15_IN_WIDTH 1
+#define FRF_AB_GPIO14_IN_LBN 46
+#define FRF_AB_GPIO14_IN_WIDTH 1
+#define FRF_AB_GPIO13_IN_LBN 45
+#define FRF_AB_GPIO13_IN_WIDTH 1
+#define FRF_AB_GPIO12_IN_LBN 44
+#define FRF_AB_GPIO12_IN_WIDTH 1
+#define FRF_AB_GPIO11_IN_LBN 43
+#define FRF_AB_GPIO11_IN_WIDTH 1
+#define FRF_AB_GPIO10_IN_LBN 42
+#define FRF_AB_GPIO10_IN_WIDTH 1
+#define FRF_AB_GPIO9_IN_LBN 41
+#define FRF_AB_GPIO9_IN_WIDTH 1
+#define FRF_AB_GPIO8_IN_LBN 40
+#define FRF_AB_GPIO8_IN_WIDTH 1
+#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_CLK156_OUT_EN_LBN 31
+#define FRF_AB_CLK156_OUT_EN_WIDTH 1
+#define FRF_AB_USE_NIC_CLK_LBN 30
+#define FRF_AB_USE_NIC_CLK_WIDTH 1
+#define FRF_AB_GPIO5_OEN_LBN 29
+#define FRF_AB_GPIO5_OEN_WIDTH 1
+#define FRF_AB_GPIO4_OEN_LBN 28
+#define FRF_AB_GPIO4_OEN_WIDTH 1
+#define FRF_AB_GPIO3_OEN_LBN 27
+#define FRF_AB_GPIO3_OEN_WIDTH 1
+#define FRF_AB_GPIO2_OEN_LBN 26
+#define FRF_AB_GPIO2_OEN_WIDTH 1
+#define FRF_AB_GPIO1_OEN_LBN 25
+#define FRF_AB_GPIO1_OEN_WIDTH 1
+#define FRF_AB_GPIO0_OEN_LBN 24
+#define FRF_AB_GPIO0_OEN_WIDTH 1
+#define FRF_AB_GPIO7_OUT_LBN 23
+#define FRF_AB_GPIO7_OUT_WIDTH 1
+#define FRF_AB_GPIO6_OUT_LBN 22
+#define FRF_AB_GPIO6_OUT_WIDTH 1
+#define FRF_AB_GPIO5_OUT_LBN 21
+#define FRF_AB_GPIO5_OUT_WIDTH 1
+#define FRF_AB_GPIO4_OUT_LBN 20
+#define FRF_AB_GPIO4_OUT_WIDTH 1
+#define FRF_AB_GPIO3_OUT_LBN 19
+#define FRF_AB_GPIO3_OUT_WIDTH 1
+#define FRF_AB_GPIO2_OUT_LBN 18
+#define FRF_AB_GPIO2_OUT_WIDTH 1
+#define FRF_AB_GPIO1_OUT_LBN 17
+#define FRF_AB_GPIO1_OUT_WIDTH 1
+#define FRF_AB_GPIO0_OUT_LBN 16
+#define FRF_AB_GPIO0_OUT_WIDTH 1
+#define FRF_AB_GPIO7_IN_LBN 15
+#define FRF_AB_GPIO7_IN_WIDTH 1
+#define FRF_AB_GPIO6_IN_LBN 14
+#define FRF_AB_GPIO6_IN_WIDTH 1
+#define FRF_AB_GPIO5_IN_LBN 13
+#define FRF_AB_GPIO5_IN_WIDTH 1
+#define FRF_AB_GPIO4_IN_LBN 12
+#define FRF_AB_GPIO4_IN_WIDTH 1
+#define FRF_AB_GPIO3_IN_LBN 11
+#define FRF_AB_GPIO3_IN_WIDTH 1
+#define FRF_AB_GPIO2_IN_LBN 10
+#define FRF_AB_GPIO2_IN_WIDTH 1
+#define FRF_AB_GPIO1_IN_LBN 9
+#define FRF_AB_GPIO1_IN_WIDTH 1
+#define FRF_AB_GPIO0_IN_LBN 8
+#define FRF_AB_GPIO0_IN_WIDTH 1
+#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7
+#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6
+#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+/* GLB_CTL_REG: Global control register */
+#define FR_AB_GLB_CTL 0x00000220
+#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define FRF_AA_PCIX_RST_CTL_LBN 60
+#define FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define FRF_BB_BIU_RST_CTL_LBN 60
+#define FRF_BB_BIU_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define FRF_AB_XGRX_RST_CTL_LBN 56
+#define FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define FRF_AB_XGTX_RST_CTL_LBN 55
+#define FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define FRF_AB_EM_RST_CTL_LBN 54
+#define FRF_AB_EM_RST_CTL_WIDTH 1
+#define FRF_AB_EV_RST_CTL_LBN 53
+#define FRF_AB_EV_RST_CTL_WIDTH 1
+#define FRF_AB_SR_RST_CTL_LBN 52
+#define FRF_AB_SR_RST_CTL_WIDTH 1
+#define FRF_AB_RX_RST_CTL_LBN 51
+#define FRF_AB_RX_RST_CTL_WIDTH 1
+#define FRF_AB_TX_RST_CTL_LBN 50
+#define FRF_AB_TX_RST_CTL_WIDTH 1
+#define FRF_AB_EE_RST_CTL_LBN 49
+#define FRF_AB_EE_RST_CTL_WIDTH 1
+#define FRF_AB_CS_RST_CTL_LBN 48
+#define FRF_AB_CS_RST_CTL_WIDTH 1
+#define FRF_AB_HOT_RST_CTL_LBN 40
+#define FRF_AB_HOT_RST_CTL_WIDTH 2
+#define FRF_AB_RST_EXT_PHY_LBN 31
+#define FRF_AB_RST_EXT_PHY_WIDTH 1
+#define FRF_AB_RST_XAUI_SD_LBN 30
+#define FRF_AB_RST_XAUI_SD_WIDTH 1
+#define FRF_AB_RST_PCIE_SD_LBN 29
+#define FRF_AB_RST_PCIE_SD_WIDTH 1
+#define FRF_AA_RST_PCIX_LBN 28
+#define FRF_AA_RST_PCIX_WIDTH 1
+#define FRF_BB_RST_BIU_LBN 28
+#define FRF_BB_RST_BIU_WIDTH 1
+#define FRF_AB_RST_PCIE_STKY_LBN 27
+#define FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define FRF_AB_RST_PCIE_CORE_LBN 25
+#define FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define FRF_AB_RST_XGRX_LBN 24
+#define FRF_AB_RST_XGRX_WIDTH 1
+#define FRF_AB_RST_XGTX_LBN 23
+#define FRF_AB_RST_XGTX_WIDTH 1
+#define FRF_AB_RST_EM_LBN 22
+#define FRF_AB_RST_EM_WIDTH 1
+#define FRF_AB_RST_EV_LBN 21
+#define FRF_AB_RST_EV_WIDTH 1
+#define FRF_AB_RST_SR_LBN 20
+#define FRF_AB_RST_SR_WIDTH 1
+#define FRF_AB_RST_RX_LBN 19
+#define FRF_AB_RST_RX_WIDTH 1
+#define FRF_AB_RST_TX_LBN 18
+#define FRF_AB_RST_TX_WIDTH 1
+#define FRF_AB_RST_SF_LBN 17
+#define FRF_AB_RST_SF_WIDTH 1
+#define FRF_AB_RST_CS_LBN 16
+#define FRF_AB_RST_CS_WIDTH 1
+#define FRF_AB_INT_RST_DUR_LBN 4
+#define FRF_AB_INT_RST_DUR_WIDTH 3
+#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define FRF_AB_SWRST_LBN 0
+#define FRF_AB_SWRST_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define FR_AZ_FATAL_INTR_KER 0x00000230
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */
+#define FR_BZ_FATAL_INTR_CHAR 0x00000240
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11
+#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10
+#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8
+#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7
+#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6
+#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3
+#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2
+#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1
+#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0
+#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+/* DP_CTRL_REG: Datapath control register */
+#define FR_BZ_DP_CTRL 0x00000250
+#define FRF_BZ_FLS_EVQ_ID_LBN 0
+#define FRF_BZ_FLS_EVQ_ID_WIDTH 12
+
+/* MEM_STAT_REG: Memory status register */
+#define FR_AZ_MEM_STAT 0x00000260
+#define FRF_AB_MEM_PERR_VEC_LBN 53
+#define FRF_AB_MEM_PERR_VEC_WIDTH 38
+#define FRF_AB_MBIST_CORR_LBN 38
+#define FRF_AB_MBIST_CORR_WIDTH 15
+#define FRF_AB_MBIST_ERR_LBN 0
+#define FRF_AB_MBIST_ERR_WIDTH 40
+#define FRF_CZ_MEM_PERR_VEC_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
+
+/* CS_DEBUG_REG: Debug register */
+#define FR_AZ_CS_DEBUG 0x00000270
+#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define FRF_CZ_CS_PORT_NUM_LBN 40
+#define FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_PORT_FPE_LBN 1
+#define FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define FRF_AZ_CS_DEBUG_EN_LBN 0
+#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+/* DRIVER_REG: Driver scratch register [0-7] */
+#define FR_AZ_DRIVER 0x00000280
+#define FR_AZ_DRIVER_STEP 16
+#define FR_AZ_DRIVER_ROWS 8
+#define FRF_AZ_DRIVER_DW0_LBN 0
+#define FRF_AZ_DRIVER_DW0_WIDTH 32
+
+/* ALTERA_BUILD_REG: Altera build register */
+#define FR_AZ_ALTERA_BUILD 0x00000300
+#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+/* CSR_SPARE_REG: Spare register */
+#define FR_AZ_CSR_SPARE 0x00000310
+#define FRF_AB_MEM_PERR_EN_LBN 64
+#define FRF_AB_MEM_PERR_EN_WIDTH 38
+#define FRF_CZ_MEM_PERR_EN_LBN 64
+#define FRF_CZ_MEM_PERR_EN_WIDTH 35
+#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72
+#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */
+#define FR_AB_PCIE_SD_CTL0123 0x00000320
+#define FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define FRF_AB_PCIE_OFFSET_LBN 56
+#define FRF_AB_PCIE_OFFSET_WIDTH 8
+#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_H_LBN 51
+#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_L_LBN 50
+#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define FRF_AB_PCIE_LPBK_LBN 40
+#define FRF_AB_PCIE_LPBK_WIDTH 8
+#define FRF_AB_PCIE_PARLPBK_LBN 32
+#define FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define FFE_AB_PCIE_RXEQCTL_OFF 2
+#define FFE_AB_PCIE_RXEQCTL_MIN 1
+#define FFE_AB_PCIE_RXEQCTL_MAX 0
+#define FRF_AB_PCIE_HIDRV_LBN 8
+#define FRF_AB_PCIE_HIDRV_WIDTH 8
+#define FRF_AB_PCIE_LODRV_LBN 0
+#define FRF_AB_PCIE_LODRV_WIDTH 8
+
+/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */
+#define FR_AB_PCIE_SD_CTL45 0x00000330
+#define FRF_AB_PCIE_DTX7_LBN 60
+#define FRF_AB_PCIE_DTX7_WIDTH 4
+#define FRF_AB_PCIE_DTX6_LBN 56
+#define FRF_AB_PCIE_DTX6_WIDTH 4
+#define FRF_AB_PCIE_DTX5_LBN 52
+#define FRF_AB_PCIE_DTX5_WIDTH 4
+#define FRF_AB_PCIE_DTX4_LBN 48
+#define FRF_AB_PCIE_DTX4_WIDTH 4
+#define FRF_AB_PCIE_DTX3_LBN 44
+#define FRF_AB_PCIE_DTX3_WIDTH 4
+#define FRF_AB_PCIE_DTX2_LBN 40
+#define FRF_AB_PCIE_DTX2_WIDTH 4
+#define FRF_AB_PCIE_DTX1_LBN 36
+#define FRF_AB_PCIE_DTX1_WIDTH 4
+#define FRF_AB_PCIE_DTX0_LBN 32
+#define FRF_AB_PCIE_DTX0_WIDTH 4
+#define FRF_AB_PCIE_DEQ7_LBN 28
+#define FRF_AB_PCIE_DEQ7_WIDTH 4
+#define FRF_AB_PCIE_DEQ6_LBN 24
+#define FRF_AB_PCIE_DEQ6_WIDTH 4
+#define FRF_AB_PCIE_DEQ5_LBN 20
+#define FRF_AB_PCIE_DEQ5_WIDTH 4
+#define FRF_AB_PCIE_DEQ4_LBN 16
+#define FRF_AB_PCIE_DEQ4_WIDTH 4
+#define FRF_AB_PCIE_DEQ3_LBN 12
+#define FRF_AB_PCIE_DEQ3_WIDTH 4
+#define FRF_AB_PCIE_DEQ2_LBN 8
+#define FRF_AB_PCIE_DEQ2_WIDTH 4
+#define FRF_AB_PCIE_DEQ1_LBN 4
+#define FRF_AB_PCIE_DEQ1_WIDTH 4
+#define FRF_AB_PCIE_DEQ0_LBN 0
+#define FRF_AB_PCIE_DEQ0_WIDTH 4
+
+/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */
+#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define FRF_AB_PCIE_PRBSERR_LBN 40
+#define FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSEL_LBN 0
+#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */
+#define FR_BB_DEBUG_DATA_OUT 0x00000350
+#define FRF_BB_DEBUG2_PORT_LBN 25
+#define FRF_BB_DEBUG2_PORT_WIDTH 15
+#define FRF_BB_DEBUG1_PORT_LBN 0
+#define FRF_BB_DEBUG1_PORT_WIDTH 25
+
+/* EVQ_RPTR_REGP0: Event queue read pointer register */
+#define FR_BZ_EVQ_RPTR_P0 0x00000400
+#define FR_BZ_EVQ_RPTR_P0_STEP 8192
+#define FR_BZ_EVQ_RPTR_P0_ROWS 1024
+/* EVQ_RPTR_REG_KER: Event queue read pointer register */
+#define FR_AA_EVQ_RPTR_KER 0x00011b00
+#define FR_AA_EVQ_RPTR_KER_STEP 4
+#define FR_AA_EVQ_RPTR_KER_ROWS 4
+/* EVQ_RPTR_REG: Event queue read pointer register */
+#define FR_BZ_EVQ_RPTR 0x00fa0000
+#define FR_BZ_EVQ_RPTR_STEP 16
+#define FR_BB_EVQ_RPTR_ROWS 4096
+#define FR_CZ_EVQ_RPTR_ROWS 1024
+/* EVQ_RPTR_REGP123: Event queue read pointer register */
+#define FR_BB_EVQ_RPTR_P123 0x01000400
+#define FR_BB_EVQ_RPTR_P123_STEP 8192
+#define FR_BB_EVQ_RPTR_P123_ROWS 3072
+#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define FRF_AZ_EVQ_RPTR_LBN 0
+#define FRF_AZ_EVQ_RPTR_WIDTH 15
+
+/* TIMER_COMMAND_REGP0: Timer Command Registers */
+#define FR_BZ_TIMER_COMMAND_P0 0x00000420
+#define FR_BZ_TIMER_COMMAND_P0_STEP 8192
+#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024
+/* TIMER_COMMAND_REG_KER: Timer Command Registers */
+#define FR_AA_TIMER_COMMAND_KER 0x00000420
+#define FR_AA_TIMER_COMMAND_KER_STEP 8192
+#define FR_AA_TIMER_COMMAND_KER_ROWS 4
+/* TIMER_COMMAND_REGP123: Timer Command Registers */
+#define FR_BB_TIMER_COMMAND_P123 0x01000420
+#define FR_BB_TIMER_COMMAND_P123_STEP 8192
+#define FR_BB_TIMER_COMMAND_P123_ROWS 3072
+#define FRF_CZ_TC_TIMER_MODE_LBN 14
+#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define FRF_AB_TC_TIMER_MODE_LBN 12
+#define FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define FRF_CZ_TC_TIMER_VAL_LBN 0
+#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define FRF_AB_TC_TIMER_VAL_LBN 0
+#define FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+/* DRV_EV_REG: Driver generated event register */
+#define FR_AZ_DRV_EV 0x00000440
+#define FRF_AZ_DRV_EV_QID_LBN 64
+#define FRF_AZ_DRV_EV_QID_WIDTH 12
+#define FRF_AZ_DRV_EV_DATA_LBN 0
+#define FRF_AZ_DRV_EV_DATA_WIDTH 64
+
+/* EVQ_CTL_REG: Event queue control register */
+#define FR_AZ_EVQ_CTL 0x00000450
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+/* EVQ_CNT1_REG: Event counter 1 register */
+#define FR_AZ_EVQ_CNT1 0x00000460
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+/* EVQ_CNT2_REG: Event counter 2 register */
+#define FR_AZ_EVQ_CNT2 0x00000470
+#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+/* USR_EV_REG: Event mailbox register */
+#define FR_CZ_USR_EV 0x00000540
+#define FR_CZ_USR_EV_STEP 8192
+#define FR_CZ_USR_EV_ROWS 1024
+#define FRF_CZ_USR_EV_DATA_LBN 0
+#define FRF_CZ_USR_EV_DATA_WIDTH 32
+
+/* BUF_TBL_CFG_REG: Buffer table configuration register */
+#define FR_AZ_BUF_TBL_CFG 0x00000600
+#define FRF_AZ_BUF_TBL_MODE_LBN 3
+#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */
+#define FR_AZ_SRM_RX_DC_CFG 0x00000610
+#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */
+#define FR_AZ_SRM_TX_DC_CFG 0x00000620
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_CFG_REG: SRAM configuration register */
+#define FR_AZ_SRM_CFG 0x00000630
+#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define FRF_AZ_SRM_INIT_EN_LBN 3
+#define FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define FRF_AZ_SRM_NUM_BANK_LBN 2
+#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+/* BUF_TBL_UPD_REG: Buffer table update register */
+#define FR_AZ_BUF_TBL_UPD 0x00000650
+#define FRF_AZ_BUF_UPD_CMD_LBN 63
+#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_CMD_LBN 62
+#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+/* SRM_UPD_EVQ_REG: Buffer table update register */
+#define FR_AZ_SRM_UPD_EVQ 0x00000660
+#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+/* SRAM_PARITY_REG: SRAM parity register. */
+#define FR_AZ_SRAM_PARITY 0x00000670
+#define FRF_CZ_BYPASS_ECC_LBN 3
+#define FRF_CZ_BYPASS_ECC_WIDTH 1
+#define FRF_CZ_SEC_INT_LBN 2
+#define FRF_CZ_SEC_INT_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+
+/* RX_CFG_REG: Receive configuration register */
+#define FR_AZ_RX_CFG 0x00000800
+#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72
+#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14
+#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define FRF_BZ_RX_TCP_SUP_LBN 48
+#define FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define FRF_BZ_RX_INGR_EN_LBN 47
+#define FRF_BZ_RX_INGR_EN_WIDTH 1
+#define FRF_BZ_RX_IP_HASH_LBN 46
+#define FRF_BZ_RX_IP_HASH_WIDTH 1
+#define FRF_BZ_RX_HASH_ALG_LBN 45
+#define FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define FRF_BZ_RX_XON_TX_TH_LBN 33
+#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_OWNERR_CTL_LBN 30
+#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_RX_XON_TX_TH_LBN 25
+#define FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XON_MAC_TH_LBN 6
+#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+/* RX_FILTER_CTL_REG: Receive filter control registers */
+#define FR_BZ_RX_FILTER_CTL 0x00000810
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define FRF_BZ_NUM_KER_LBN 24
+#define FRF_BZ_NUM_KER_WIDTH 2
+#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */
+#define FR_AZ_RX_FLUSH_DESCQ 0x00000820
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+#define FR_BZ_RX_DESC_UPD_P0 0x00000830
+#define FR_BZ_RX_DESC_UPD_P0_STEP 8192
+#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024
+/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */
+#define FR_AA_RX_DESC_UPD_KER 0x00000830
+#define FR_AA_RX_DESC_UPD_KER_STEP 8192
+#define FR_AA_RX_DESC_UPD_KER_ROWS 4
+/* RX_DESC_UPD_REGP123: Receive descriptor update register. */
+#define FR_BB_RX_DESC_UPD_P123 0x01000830
+#define FR_BB_RX_DESC_UPD_P123_STEP 8192
+#define FR_BB_RX_DESC_UPD_P123_ROWS 3072
+#define FRF_AZ_RX_DESC_WPTR_LBN 96
+#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_RX_DESC_LBN 0
+#define FRF_AZ_RX_DESC_WIDTH 64
+
+/* RX_DC_CFG_REG: Receive descriptor cache configuration register */
+#define FR_AZ_RX_DC_CFG 0x00000840
+#define FRF_AB_RX_MAX_PF_LBN 2
+#define FRF_AB_RX_MAX_PF_WIDTH 2
+#define FRF_AZ_RX_DC_SIZE_LBN 0
+#define FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define FFE_AZ_RX_DC_SIZE_64 3
+#define FFE_AZ_RX_DC_SIZE_32 2
+#define FFE_AZ_RX_DC_SIZE_16 1
+#define FFE_AZ_RX_DC_SIZE_8 0
+
+/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */
+#define FR_AZ_RX_DC_PF_WM 0x00000850
+#define FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */
+#define FR_BZ_RX_RSS_TKEY 0x00000860
+#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64
+#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64
+#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0
+#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64
+
+/* RX_NODESC_DROP_REG: Receive dropped packet counter register */
+#define FR_AZ_RX_NODESC_DROP 0x00000880
+#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32
+#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16
+
+/* RX_SELF_RST_REG: Receive self reset register */
+#define FR_AA_RX_SELF_RST 0x00000890
+#define FRF_AA_RX_ISCSI_DIS_LBN 17
+#define FRF_AA_RX_ISCSI_DIS_WIDTH 1
+#define FRF_AA_RX_SW_RST_REG_LBN 16
+#define FRF_AA_RX_SW_RST_REG_WIDTH 1
+#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9
+#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1
+#define FRF_AA_RX_SELF_RST_EN_LBN 8
+#define FRF_AA_RX_SELF_RST_EN_WIDTH 1
+#define FRF_AA_RX_MAX_PF_LAT_LBN 4
+#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4
+#define FRF_AA_RX_MAX_LU_LAT_LBN 0
+#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4
+
+/* RX_DEBUG_REG: undocumented register */
+#define FR_AZ_RX_DEBUG 0x000008a0
+#define FRF_AZ_RX_DEBUG_LBN 0
+#define FRF_AZ_RX_DEBUG_WIDTH 64
+
+/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */
+#define FR_AZ_RX_PUSH_DROP 0x000008b0
+#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */
+#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+
+/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */
+#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+
+/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */
+#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+
+/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */
+#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define FR_BZ_TX_DESC_UPD_P0 0x00000a10
+#define FR_BZ_TX_DESC_UPD_P0_STEP 8192
+#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024
+/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */
+#define FR_AA_TX_DESC_UPD_KER 0x00000a10
+#define FR_AA_TX_DESC_UPD_KER_STEP 8192
+#define FR_AA_TX_DESC_UPD_KER_ROWS 8
+/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */
+#define FR_BB_TX_DESC_UPD_P123 0x01000a10
+#define FR_BB_TX_DESC_UPD_P123_STEP 8192
+#define FR_BB_TX_DESC_UPD_P123_ROWS 3072
+#define FRF_AZ_TX_DESC_WPTR_LBN 96
+#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_TX_DESC_LBN 0
+#define FRF_AZ_TX_DESC_WIDTH 95
+
+/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */
+#define FR_AZ_TX_DC_CFG 0x00000a20
+#define FRF_AZ_TX_DC_SIZE_LBN 0
+#define FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define FFE_AZ_TX_DC_SIZE_32 2
+#define FFE_AZ_TX_DC_SIZE_16 1
+#define FFE_AZ_TX_DC_SIZE_8 0
+
+/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */
+#define FR_AA_TX_CHKSM_CFG 0x00000a30
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+/* TX_CFG_REG: Transmit configuration register */
+#define FR_AZ_TX_CFG 0x00000a50
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+/* TX_PUSH_DROP_REG: Transmit push dropped register */
+#define FR_AZ_TX_PUSH_DROP 0x00000a60
+#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+/* TX_RESERVED_REG: Transmit configuration register */
+#define FR_AZ_TX_RESERVED 0x00000a80
+#define FRF_AZ_TX_EVT_CNT_LBN 121
+#define FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define FRF_AZ_TX_PUSH_EN_LBN 89
+#define FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define FRF_AZ_TX_DMAQ_ST_LBN 78
+#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_LBN 64
+#define FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define FRF_AZ_TX_XP_TIMER_LBN 52
+#define FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define FRF_AZ_TX_PREF_SPACER_LBN 44
+#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define FRF_AZ_TX_ONLY1TAG_LBN 21
+#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define FRF_AA_TX_DMA_FF_THR_LBN 16
+#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define FRF_AZ_TX_DMA_SPACER_LBN 8
+#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define FRF_AA_TX_TCP_DIS_LBN 7
+#define FRF_AA_TX_TCP_DIS_WIDTH 1
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define FRF_AA_TX_IP_DIS_LBN 6
+#define FRF_AA_TX_IP_DIS_WIDTH 1
+#define FRF_AZ_TX_MAX_CPL_LBN 2
+#define FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define FFE_AZ_TX_MAX_CPL_16 3
+#define FFE_AZ_TX_MAX_CPL_8 2
+#define FFE_AZ_TX_MAX_CPL_4 1
+#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define FRF_AZ_TX_MAX_PREF_LBN 0
+#define FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define FFE_AZ_TX_MAX_PREF_32 3
+#define FFE_AZ_TX_MAX_PREF_16 2
+#define FFE_AZ_TX_MAX_PREF_8 1
+#define FFE_AZ_TX_MAX_PREF_OFF 0
+
+/* TX_PACE_REG: Transmit pace control register */
+#define FR_BZ_TX_PACE 0x00000a90
+#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19
+#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define FRF_BZ_TX_PACE_SB_AF_LBN 9
+#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10
+#define FRF_BZ_TX_PACE_FB_BASE_LBN 5
+#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4
+#define FRF_BZ_TX_PACE_BIN_TH_LBN 0
+#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5
+
+/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */
+#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0
+#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+/* TX_VLAN_REG: Transmit VLAN tag register */
+#define FR_BB_TX_VLAN 0x00000ae0
+#define FRF_BB_TX_VLAN_EN_LBN 127
+#define FRF_BB_TX_VLAN_EN_WIDTH 1
+#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125
+#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124
+#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN7_LBN 112
+#define FRF_BB_TX_VLAN7_WIDTH 12
+#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109
+#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108
+#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN6_LBN 96
+#define FRF_BB_TX_VLAN6_WIDTH 12
+#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93
+#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92
+#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN5_LBN 80
+#define FRF_BB_TX_VLAN5_WIDTH 12
+#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77
+#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76
+#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN4_LBN 64
+#define FRF_BB_TX_VLAN4_WIDTH 12
+#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61
+#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60
+#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN3_LBN 48
+#define FRF_BB_TX_VLAN3_WIDTH 12
+#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45
+#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44
+#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN2_LBN 32
+#define FRF_BB_TX_VLAN2_WIDTH 12
+#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29
+#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28
+#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN1_LBN 16
+#define FRF_BB_TX_VLAN1_WIDTH 12
+#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13
+#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12
+#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define FRF_BB_TX_VLAN0_LBN 0
+#define FRF_BB_TX_VLAN0_WIDTH 12
+
+/* TX_IPFIL_PORTEN_REG: Transmit filter control register */
+#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0
+#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64
+#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62
+#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60
+#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58
+#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56
+#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54
+#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52
+#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50
+#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48
+#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46
+#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44
+#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42
+#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40
+#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38
+#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36
+#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34
+#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32
+#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30
+#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28
+#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26
+#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24
+#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22
+#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20
+#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18
+#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16
+#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14
+#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12
+#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10
+#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8
+#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6
+#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4
+#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2
+#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0
+#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+/* TX_IPFIL_TBL: Transmit IP source address filter table */
+#define FR_BB_TX_IPFIL_TBL 0x00000b00
+#define FR_BB_TX_IPFIL_TBL_STEP 16
+#define FR_BB_TX_IPFIL_TBL_ROWS 16
+#define FRF_BB_TX_IPFIL_MASK_1_LBN 96
+#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32
+#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64
+#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32
+#define FRF_BB_TX_IPFIL_MASK_0_LBN 32
+#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32
+#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0
+#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32
+
+/* MD_TXD_REG: PHY management transmit data register */
+#define FR_AB_MD_TXD 0x00000c00
+#define FRF_AB_MD_TXD_LBN 0
+#define FRF_AB_MD_TXD_WIDTH 16
+
+/* MD_RXD_REG: PHY management receive data register */
+#define FR_AB_MD_RXD 0x00000c10
+#define FRF_AB_MD_RXD_LBN 0
+#define FRF_AB_MD_RXD_WIDTH 16
+
+/* MD_CS_REG: PHY management configuration & status register */
+#define FR_AB_MD_CS 0x00000c20
+#define FRF_AB_MD_RD_EN_CMD_LBN 15
+#define FRF_AB_MD_RD_EN_CMD_WIDTH 1
+#define FRF_AB_MD_WR_EN_CMD_LBN 14
+#define FRF_AB_MD_WR_EN_CMD_WIDTH 1
+#define FRF_AB_MD_ADDR_CMD_LBN 13
+#define FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define FRF_AB_MD_PT_LBN 7
+#define FRF_AB_MD_PT_WIDTH 3
+#define FRF_AB_MD_PL_LBN 6
+#define FRF_AB_MD_PL_WIDTH 1
+#define FRF_AB_MD_INT_CLR_LBN 5
+#define FRF_AB_MD_INT_CLR_WIDTH 1
+#define FRF_AB_MD_GC_LBN 4
+#define FRF_AB_MD_GC_WIDTH 1
+#define FRF_AB_MD_PRSP_LBN 3
+#define FRF_AB_MD_PRSP_WIDTH 1
+#define FRF_AB_MD_RIC_LBN 2
+#define FRF_AB_MD_RIC_WIDTH 1
+#define FRF_AB_MD_RDC_LBN 1
+#define FRF_AB_MD_RDC_WIDTH 1
+#define FRF_AB_MD_WRC_LBN 0
+#define FRF_AB_MD_WRC_WIDTH 1
+
+/* MD_PHY_ADR_REG: PHY management PHY address register */
+#define FR_AB_MD_PHY_ADR 0x00000c30
+#define FRF_AB_MD_PHY_ADR_LBN 0
+#define FRF_AB_MD_PHY_ADR_WIDTH 16
+
+/* MD_ID_REG: PHY management ID register */
+#define FR_AB_MD_ID 0x00000c40
+#define FRF_AB_MD_PRT_ADR_LBN 11
+#define FRF_AB_MD_PRT_ADR_WIDTH 5
+#define FRF_AB_MD_DEV_ADR_LBN 6
+#define FRF_AB_MD_DEV_ADR_WIDTH 5
+
+/* MD_STAT_REG: PHY management status & mask register */
+#define FR_AB_MD_STAT 0x00000c50
+#define FRF_AB_MD_PINT_LBN 4
+#define FRF_AB_MD_PINT_WIDTH 1
+#define FRF_AB_MD_DONE_LBN 3
+#define FRF_AB_MD_DONE_WIDTH 1
+#define FRF_AB_MD_BSERR_LBN 2
+#define FRF_AB_MD_BSERR_WIDTH 1
+#define FRF_AB_MD_LNFL_LBN 1
+#define FRF_AB_MD_LNFL_WIDTH 1
+#define FRF_AB_MD_BSY_LBN 0
+#define FRF_AB_MD_BSY_WIDTH 1
+
+/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */
+#define FR_AB_MAC_STAT_DMA 0x00000c60
+#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+
+/* MAC_CTRL_REG: Port MAC control register */
+#define FR_AB_MAC_CTRL 0x00000c80
+#define FRF_AB_MAC_XOFF_VAL_LBN 16
+#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define FRF_AB_MAC_UC_PROM_LBN 3
+#define FRF_AB_MAC_UC_PROM_WIDTH 1
+#define FRF_AB_MAC_LINK_STATUS_LBN 2
+#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define FRF_AB_MAC_SPEED_LBN 0
+#define FRF_AB_MAC_SPEED_WIDTH 2
+#define FFE_AB_MAC_SPEED_10G 3
+#define FFE_AB_MAC_SPEED_1G 2
+#define FFE_AB_MAC_SPEED_100M 1
+#define FFE_AB_MAC_SPEED_10M 0
+
+/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */
+#define FR_BB_GEN_MODE 0x00000c90
+#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+/* MAC_MC_HASH_REG0: Multicast address hash table */
+#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0
+#define FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+
+/* MAC_MC_HASH_REG1: Multicast address hash table */
+#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0
+#define FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+
+/* GM_CFG1_REG: GMAC configuration register 1 */
+#define FR_AB_GM_CFG1 0x00000e00
+#define FRF_AB_GM_SW_RST_LBN 31
+#define FRF_AB_GM_SW_RST_WIDTH 1
+#define FRF_AB_GM_SIM_RST_LBN 30
+#define FRF_AB_GM_SIM_RST_WIDTH 1
+#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define FRF_AB_GM_LOOP_LBN 8
+#define FRF_AB_GM_LOOP_WIDTH 1
+#define FRF_AB_GM_RX_FC_EN_LBN 5
+#define FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define FRF_AB_GM_TX_FC_EN_LBN 4
+#define FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_RXEN_LBN 3
+#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define FRF_AB_GM_RX_EN_LBN 2
+#define FRF_AB_GM_RX_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_TXEN_LBN 1
+#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define FRF_AB_GM_TX_EN_LBN 0
+#define FRF_AB_GM_TX_EN_WIDTH 1
+
+/* GM_CFG2_REG: GMAC configuration register 2 */
+#define FR_AB_GM_CFG2 0x00000e10
+#define FRF_AB_GM_PAMBL_LEN_LBN 12
+#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define FRF_AB_GM_IF_MODE_LBN 8
+#define FRF_AB_GM_IF_MODE_WIDTH 2
+#define FFE_AB_IF_MODE_BYTE_MODE 2
+#define FFE_AB_IF_MODE_NIBBLE_MODE 1
+#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define FRF_AB_GM_LEN_CHK_LBN 4
+#define FRF_AB_GM_LEN_CHK_WIDTH 1
+#define FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define FRF_AB_GM_CRC_EN_LBN 1
+#define FRF_AB_GM_CRC_EN_WIDTH 1
+#define FRF_AB_GM_FD_LBN 0
+#define FRF_AB_GM_FD_WIDTH 1
+
+/* GM_IPG_REG: GMAC IPG register */
+#define FR_AB_GM_IPG 0x00000e20
+#define FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define FRF_AB_GM_B2B_IPG_LBN 0
+#define FRF_AB_GM_B2B_IPG_WIDTH 7
+
+/* GM_HD_REG: GMAC half duplex register */
+#define FR_AB_GM_HD 0x00000e30
+#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define FRF_AB_GM_DIS_BOFF_LBN 17
+#define FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define FRF_AB_GM_COL_WIN_LBN 0
+#define FRF_AB_GM_COL_WIN_WIDTH 10
+
+/* GM_MAX_FLEN_REG: GMAC maximum frame length register */
+#define FR_AB_GM_MAX_FLEN 0x00000e40
+#define FRF_AB_GM_MAX_FLEN_LBN 0
+#define FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+/* GM_TEST_REG: GMAC test register */
+#define FR_AB_GM_TEST 0x00000e70
+#define FRF_AB_GM_MAX_BOFF_LBN 3
+#define FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define FRF_AB_GM_TEST_PAUSE_LBN 1
+#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define FRF_AB_GM_SHORT_SLOT_LBN 0
+#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+/* GM_ADR1_REG: GMAC station address register 1 */
+#define FR_AB_GM_ADR1 0x00000f00
+#define FRF_AB_GM_ADR_B0_LBN 24
+#define FRF_AB_GM_ADR_B0_WIDTH 8
+#define FRF_AB_GM_ADR_B1_LBN 16
+#define FRF_AB_GM_ADR_B1_WIDTH 8
+#define FRF_AB_GM_ADR_B2_LBN 8
+#define FRF_AB_GM_ADR_B2_WIDTH 8
+#define FRF_AB_GM_ADR_B3_LBN 0
+#define FRF_AB_GM_ADR_B3_WIDTH 8
+
+/* GM_ADR2_REG: GMAC station address register 2 */
+#define FR_AB_GM_ADR2 0x00000f10
+#define FRF_AB_GM_ADR_B4_LBN 24
+#define FRF_AB_GM_ADR_B4_WIDTH 8
+#define FRF_AB_GM_ADR_B5_LBN 16
+#define FRF_AB_GM_ADR_B5_WIDTH 8
+
+/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */
+#define FR_AB_GMF_CFG0 0x00000f20
+#define FRF_AB_GMF_FTFENRPLY_LBN 20
+#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define FRF_AB_GMF_STFENRPLY_LBN 19
+#define FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define FRF_AB_GMF_FRFENRPLY_LBN 18
+#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_SRFENRPLY_LBN 17
+#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_WTMENRPLY_LBN 16
+#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define FRF_AB_GMF_FTFENREQ_LBN 12
+#define FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define FRF_AB_GMF_STFENREQ_LBN 11
+#define FRF_AB_GMF_STFENREQ_WIDTH 1
+#define FRF_AB_GMF_FRFENREQ_LBN 10
+#define FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define FRF_AB_GMF_SRFENREQ_LBN 9
+#define FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define FRF_AB_GMF_WTMENREQ_LBN 8
+#define FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFT_LBN 4
+#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define FRF_AB_GMF_HSTRSTST_LBN 3
+#define FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFR_LBN 2
+#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTSR_LBN 1
+#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTWT_LBN 0
+#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */
+#define FR_AB_GMF_CFG1 0x00000f30
+#define FRF_AB_GMF_CFGFRTH_LBN 16
+#define FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */
+#define FR_AB_GMF_CFG2 0x00000f40
+#define FRF_AB_GMF_CFGHWM_LBN 16
+#define FRF_AB_GMF_CFGHWM_WIDTH 6
+#define FRF_AB_GMF_CFGLWM_LBN 0
+#define FRF_AB_GMF_CFGLWM_WIDTH 6
+
+/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */
+#define FR_AB_GMF_CFG3 0x00000f50
+#define FRF_AB_GMF_CFGHWMFT_LBN 16
+#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define FRF_AB_GMF_CFGFTTH_LBN 0
+#define FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define FR_AB_GMF_CFG4 0x00000f60
+#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define FR_AB_GMF_CFG5 0x00000f70
+#define FRF_AB_GMF_CFGHDPLX_LBN 22
+#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define FRF_AB_GMF_SRFULL_LBN 21
+#define FRF_AB_GMF_SRFULL_WIDTH 1
+#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+/* TX_SRC_MAC_TBL: Transmit IP source address filter table */
+#define FR_BB_TX_SRC_MAC_TBL 0x00001000
+#define FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
+#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+
+/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */
+#define FR_BB_TX_SRC_MAC_CTL 0x00001100
+#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+/* XM_ADR_LO_REG: XGMAC address register low */
+#define FR_AB_XM_ADR_LO 0x00001200
+#define FRF_AB_XM_ADR_LO_LBN 0
+#define FRF_AB_XM_ADR_LO_WIDTH 32
+
+/* XM_ADR_HI_REG: XGMAC address register high */
+#define FR_AB_XM_ADR_HI 0x00001210
+#define FRF_AB_XM_ADR_HI_LBN 0
+#define FRF_AB_XM_ADR_HI_WIDTH 16
+
+/* XM_GLB_CFG_REG: XGMAC global configuration */
+#define FR_AB_XM_GLB_CFG 0x00001220
+#define FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define FRF_AB_XM_DEBUG_MODE_LBN 16
+#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define FRF_AB_XM_RX_STAT_EN_LBN 11
+#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_TX_STAT_EN_LBN 10
+#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_WAN_MODE_LBN 5
+#define FRF_AB_XM_WAN_MODE_WIDTH 1
+#define FRF_AB_XM_INTCLR_MODE_LBN 3
+#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define FRF_AB_XM_CORE_RST_LBN 0
+#define FRF_AB_XM_CORE_RST_WIDTH 1
+
+/* XM_TX_CFG_REG: XGMAC transmit configuration */
+#define FR_AB_XM_TX_CFG 0x00001230
+#define FRF_AB_XM_TX_PROG_LBN 24
+#define FRF_AB_XM_TX_PROG_WIDTH 1
+#define FRF_AB_XM_IPG_LBN 16
+#define FRF_AB_XM_IPG_WIDTH 4
+#define FRF_AB_XM_FCNTL_LBN 10
+#define FRF_AB_XM_FCNTL_WIDTH 1
+#define FRF_AB_XM_TXCRC_LBN 8
+#define FRF_AB_XM_TXCRC_WIDTH 1
+#define FRF_AB_XM_EDRC_LBN 6
+#define FRF_AB_XM_EDRC_WIDTH 1
+#define FRF_AB_XM_AUTO_PAD_LBN 5
+#define FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define FRF_AB_XM_TX_PRMBL_LBN 2
+#define FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define FRF_AB_XM_TXEN_LBN 1
+#define FRF_AB_XM_TXEN_WIDTH 1
+#define FRF_AB_XM_TX_RST_LBN 0
+#define FRF_AB_XM_TX_RST_WIDTH 1
+
+/* XM_RX_CFG_REG: XGMAC receive configuration */
+#define FR_AB_XM_RX_CFG 0x00001240
+#define FRF_AB_XM_PASS_LENERR_LBN 26
+#define FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_REJ_BCAST_LBN 20
+#define FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define FRF_AB_XM_RXCRC_LBN 3
+#define FRF_AB_XM_RXCRC_WIDTH 1
+#define FRF_AB_XM_RX_PRMBL_LBN 2
+#define FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define FRF_AB_XM_RXEN_LBN 1
+#define FRF_AB_XM_RXEN_WIDTH 1
+#define FRF_AB_XM_RX_RST_LBN 0
+#define FRF_AB_XM_RX_RST_WIDTH 1
+
+/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */
+#define FR_AB_XM_MGT_INT_MASK 0x00001250
+#define FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+/* XM_FC_REG: XGMAC flow control register */
+#define FR_AB_XM_FC 0x00001270
+#define FRF_AB_XM_PAUSE_TIME_LBN 16
+#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_MCNTL_PASS_LBN 8
+#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define FRF_AB_XM_ZPAUSE_LBN 2
+#define FRF_AB_XM_ZPAUSE_WIDTH 1
+#define FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define FRF_AB_XM_DIS_FCNTL_LBN 0
+#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+/* XM_PAUSE_TIME_REG: XGMAC pause time register */
+#define FR_AB_XM_PAUSE_TIME 0x00001290
+#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define FR_AB_XM_TX_PARAM 0x000012d0
+#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define FRF_AB_XM_PAD_CHAR_LBN 0
+#define FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define FR_AB_XM_RX_PARAM 0x000012e0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */
+#define FR_AB_XM_MGT_INT_MSK 0x000012f0
+#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_RMTFLT_LBN 1
+#define FRF_AB_XM_RMTFLT_WIDTH 1
+#define FRF_AB_XM_LCLFLT_LBN 0
+#define FRF_AB_XM_LCLFLT_WIDTH 1
+
+/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */
+#define FR_AB_XX_PWR_RST 0x00001300
+#define FRF_AB_XX_PWRDND_SIG_LBN 31
+#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define FRF_AB_XX_SIM_MODE_LBN 27
+#define FRF_AB_XX_SIM_MODE_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETD_SIG_LBN 23
+#define FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define FRF_AB_XX_RESETC_SIG_LBN 22
+#define FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define FRF_AB_XX_RESETB_SIG_LBN 21
+#define FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETA_SIG_LBN 20
+#define FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define FRF_AB_XX_SD_RST_ACT_LBN 16
+#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define FRF_AB_XX_PWRDND_EN_LBN 15
+#define FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNC_EN_LBN 14
+#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNB_EN_LBN 13
+#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNA_EN_LBN 12
+#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define FRF_AB_XX_RESETD_EN_LBN 7
+#define FRF_AB_XX_RESETD_EN_WIDTH 1
+#define FRF_AB_XX_RESETC_EN_LBN 6
+#define FRF_AB_XX_RESETC_EN_WIDTH 1
+#define FRF_AB_XX_RESETB_EN_LBN 5
+#define FRF_AB_XX_RESETB_EN_WIDTH 1
+#define FRF_AB_XX_RESETA_EN_LBN 4
+#define FRF_AB_XX_RESETA_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define FRF_AB_XX_RST_XX_EN_LBN 0
+#define FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */
+#define FR_AB_XX_SD_CTL 0x00001310
+#define FRF_AB_XX_TERMADJ1_LBN 17
+#define FRF_AB_XX_TERMADJ1_WIDTH 1
+#define FRF_AB_XX_TERMADJ0_LBN 16
+#define FRF_AB_XX_TERMADJ0_WIDTH 1
+#define FRF_AB_XX_HIDRVD_LBN 15
+#define FRF_AB_XX_HIDRVD_WIDTH 1
+#define FRF_AB_XX_LODRVD_LBN 14
+#define FRF_AB_XX_LODRVD_WIDTH 1
+#define FRF_AB_XX_HIDRVC_LBN 13
+#define FRF_AB_XX_HIDRVC_WIDTH 1
+#define FRF_AB_XX_LODRVC_LBN 12
+#define FRF_AB_XX_LODRVC_WIDTH 1
+#define FRF_AB_XX_HIDRVB_LBN 11
+#define FRF_AB_XX_HIDRVB_WIDTH 1
+#define FRF_AB_XX_LODRVB_LBN 10
+#define FRF_AB_XX_LODRVB_WIDTH 1
+#define FRF_AB_XX_HIDRVA_LBN 9
+#define FRF_AB_XX_HIDRVA_WIDTH 1
+#define FRF_AB_XX_LODRVA_LBN 8
+#define FRF_AB_XX_LODRVA_WIDTH 1
+#define FRF_AB_XX_LPBKD_LBN 3
+#define FRF_AB_XX_LPBKD_WIDTH 1
+#define FRF_AB_XX_LPBKC_LBN 2
+#define FRF_AB_XX_LPBKC_WIDTH 1
+#define FRF_AB_XX_LPBKB_LBN 1
+#define FRF_AB_XX_LPBKB_WIDTH 1
+#define FRF_AB_XX_LPBKA_LBN 0
+#define FRF_AB_XX_LPBKA_WIDTH 1
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+#define FR_AB_XX_TXDRV_CTL 0x00001320
+#define FRF_AB_XX_DEQD_LBN 28
+#define FRF_AB_XX_DEQD_WIDTH 4
+#define FRF_AB_XX_DEQC_LBN 24
+#define FRF_AB_XX_DEQC_WIDTH 4
+#define FRF_AB_XX_DEQB_LBN 20
+#define FRF_AB_XX_DEQB_WIDTH 4
+#define FRF_AB_XX_DEQA_LBN 16
+#define FRF_AB_XX_DEQA_WIDTH 4
+#define FRF_AB_XX_DTXD_LBN 12
+#define FRF_AB_XX_DTXD_WIDTH 4
+#define FRF_AB_XX_DTXC_LBN 8
+#define FRF_AB_XX_DTXC_WIDTH 4
+#define FRF_AB_XX_DTXB_LBN 4
+#define FRF_AB_XX_DTXB_WIDTH 4
+#define FRF_AB_XX_DTXA_LBN 0
+#define FRF_AB_XX_DTXA_WIDTH 4
+
+/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */
+#define FR_AB_XX_PRBS_CTL 0x00001330
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */
+#define FR_AB_XX_PRBS_CHK 0x00001340
+#define FRF_AB_XX_REV_LB_EN_LBN 16
+#define FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */
+#define FR_AB_XX_PRBS_ERR 0x00001350
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+#define FR_AB_XX_CORE_STAT 0x00001360
+#define FRF_AB_XX_FORCE_SIG3_LBN 31
+#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_LBN 29
+#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_LBN 27
+#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_LBN 25
+#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define FRF_AB_XX_MATCH_FAULT_LBN 21
+#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define FRF_AB_XX_ALIGN_DONE_LBN 20
+#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT3_LBN 19
+#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT2_LBN 18
+#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT1_LBN 17
+#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT0_LBN 16
+#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH3_LBN 3
+#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH2_LBN 2
+#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH1_LBN 1
+#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH0_LBN 0
+#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */
+#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800
+#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/* RX_DESC_PTR_TBL: Receive descriptor pointer table */
+#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000
+#define FR_BZ_RX_DESC_PTR_TBL_STEP 16
+#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define FRF_AA_RX_RESET_LBN 89
+#define FRF_AA_RX_RESET_WIDTH 1
+#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define FFE_AZ_RX_DESCQ_SIZE_512 0
+#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define FRF_AZ_RX_DESCQ_EN_LBN 0
+#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */
+#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900
+#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/* TX_DESC_PTR_TBL: Transmit descriptor pointer */
+#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000
+#define FR_BZ_TX_DESC_PTR_TBL_STEP 16
+#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define FRF_AZ_TX_DESCQ_EN_LBN 88
+#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define FFE_AZ_TX_DESCQ_SIZE_512 0
+#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+/* EVQ_PTR_TBL_KER: Event queue pointer table */
+#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00
+#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/* EVQ_PTR_TBL: Event queue pointer table */
+#define FR_BZ_EVQ_PTR_TBL 0x00f60000
+#define FR_BZ_EVQ_PTR_TBL_STEP 16
+#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define FR_BB_EVQ_PTR_TBL_ROWS 4096
+#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39
+#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39
+#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1
+#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define FRF_AZ_EVQ_EN_LBN 23
+#define FRF_AZ_EVQ_EN_WIDTH 1
+#define FRF_AZ_EVQ_SIZE_LBN 20
+#define FRF_AZ_EVQ_SIZE_WIDTH 3
+#define FFE_AZ_EVQ_SIZE_32K 6
+#define FFE_AZ_EVQ_SIZE_16K 5
+#define FFE_AZ_EVQ_SIZE_8K 4
+#define FFE_AZ_EVQ_SIZE_4K 3
+#define FFE_AZ_EVQ_SIZE_2K 2
+#define FFE_AZ_EVQ_SIZE_1K 1
+#define FFE_AZ_EVQ_SIZE_512 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */
+#define FR_AA_BUF_HALF_TBL_KER 0x00018000
+#define FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */
+#define FR_BZ_BUF_HALF_TBL 0x00800000
+#define FR_BZ_BUF_HALF_TBL_STEP 8
+#define FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define FR_BB_BUF_HALF_TBL_ROWS 524288
+#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */
+#define FR_AA_BUF_FULL_TBL_KER 0x00018000
+#define FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */
+#define FR_BZ_BUF_FULL_TBL 0x00800000
+#define FR_BZ_BUF_FULL_TBL_STEP 8
+#define FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define FR_BB_BUF_FULL_TBL_ROWS 917504
+#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define FRF_AZ_BUF_ADR_REGION_LBN 48
+#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define FFE_AZ_BUF_ADR_REGN3 3
+#define FFE_AZ_BUF_ADR_REGN2 2
+#define FFE_AZ_BUF_ADR_REGN1 1
+#define FFE_AZ_BUF_ADR_REGN0 0
+#define FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */
+#define FR_BZ_RX_FILTER_TBL0 0x00f00000
+#define FR_BZ_RX_FILTER_TBL0_STEP 32
+#define FR_BZ_RX_FILTER_TBL0_ROWS 8192
+/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */
+#define FR_BB_RX_FILTER_TBL1 0x00f00010
+#define FR_BB_RX_FILTER_TBL1_STEP 32
+#define FR_BB_RX_FILTER_TBL1_ROWS 8192
+#define FRF_BZ_RSS_EN_LBN 110
+#define FRF_BZ_RSS_EN_WIDTH 1
+#define FRF_BZ_SCATTER_EN_LBN 109
+#define FRF_BZ_SCATTER_EN_WIDTH 1
+#define FRF_BZ_TCP_UDP_LBN 108
+#define FRF_BZ_TCP_UDP_WIDTH 1
+#define FRF_BZ_RXQ_ID_LBN 96
+#define FRF_BZ_RXQ_ID_WIDTH 12
+#define FRF_BZ_DEST_IP_LBN 64
+#define FRF_BZ_DEST_IP_WIDTH 32
+#define FRF_BZ_DEST_PORT_TCP_LBN 48
+#define FRF_BZ_DEST_PORT_TCP_WIDTH 16
+#define FRF_BZ_SRC_IP_LBN 16
+#define FRF_BZ_SRC_IP_WIDTH 32
+#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */
+#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010
+#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+#define FRF_CZ_RMFT_RSS_EN_LBN 75
+#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_RMFT_DEST_MAC_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
+#define FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+/* TIMER_TBL: Timer table */
+#define FR_BZ_TIMER_TBL 0x00f70000
+#define FR_BZ_TIMER_TBL_STEP 16
+#define FR_CZ_TIMER_TBL_ROWS 1024
+#define FR_BB_TIMER_TBL_ROWS 4096
+#define FRF_CZ_TIMER_Q_EN_LBN 33
+#define FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define FRF_CZ_INT_ARMD_LBN 32
+#define FRF_CZ_INT_ARMD_WIDTH 1
+#define FRF_CZ_INT_PEND_LBN 31
+#define FRF_CZ_INT_PEND_WIDTH 1
+#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define FRF_CZ_TIMER_MODE_LBN 14
+#define FRF_CZ_TIMER_MODE_WIDTH 2
+#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define FFE_CZ_TIMER_MODE_TRIG_START 2
+#define FFE_CZ_TIMER_MODE_IMMED_START 1
+#define FFE_CZ_TIMER_MODE_DIS 0
+#define FRF_BB_TIMER_MODE_LBN 12
+#define FRF_BB_TIMER_MODE_WIDTH 2
+#define FFE_BB_TIMER_MODE_INT_HLDOFF 2
+#define FFE_BB_TIMER_MODE_TRIG_START 2
+#define FFE_BB_TIMER_MODE_IMMED_START 1
+#define FFE_BB_TIMER_MODE_DIS 0
+#define FRF_CZ_TIMER_VAL_LBN 0
+#define FRF_CZ_TIMER_VAL_WIDTH 14
+#define FRF_BB_TIMER_VAL_LBN 0
+#define FRF_BB_TIMER_VAL_WIDTH 12
+
+/* TX_PACE_TBL: Transmit pacing table */
+#define FR_BZ_TX_PACE_TBL 0x00f80000
+#define FR_BZ_TX_PACE_TBL_STEP 16
+#define FR_CZ_TX_PACE_TBL_ROWS 1024
+#define FR_BB_TX_PACE_TBL_ROWS 4096
+#define FRF_BZ_TX_PACE_LBN 0
+#define FRF_BZ_TX_PACE_WIDTH 5
+
+/* RX_INDIRECTION_TBL: RX Indirection Table */
+#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000
+#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+#define FRF_BZ_IT_QUEUE_LBN 0
+#define FRF_BZ_IT_QUEUE_WIDTH 6
+
+/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */
+#define FR_CZ_TX_FILTER_TBL0 0x00fc0000
+#define FR_CZ_TX_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
+#define FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TIFT_DEST_IP_LBN 64
+#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define FRF_CZ_TIFT_SRC_IP_LBN 16
+#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */
+#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000
+#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+#define FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_TMFT_SRC_MAC_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
+#define FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+/* MC_TREG_SMEM: MC Shared Memory */
+#define FR_CZ_MC_TREG_SMEM 0x00ff0000
+#define FR_CZ_MC_TREG_SMEM_STEP 4
+#define FR_CZ_MC_TREG_SMEM_ROWS 512
+#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000
+#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define FR_BB_MSIX_PBA_TABLE 0x00ff2000
+#define FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define FR_BB_MSIX_PBA_TABLE_ROWS 2
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define FR_CZ_MSIX_PBA_TABLE 0x00008000
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* SRM_DBG_REG: SRAM debug access */
+#define FR_BZ_SRM_DBG 0x03000000
+#define FR_BZ_SRM_DBG_STEP 8
+#define FR_CZ_SRM_DBG_ROWS 262144
+#define FR_BB_SRM_DBG_ROWS 2097152
+#define FRF_BZ_SRM_DBG_LBN 0
+#define FRF_BZ_SRM_DBG_WIDTH 64
+
+/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000
+#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4
+#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024
+#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* DRIVER_EV */
+#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define FSE_BZ_TX_DSC_ERROR_EV 15
+#define FSE_BZ_RX_DSC_ERROR_EV 14
+#define FSE_AA_RX_RECOVER_EV 11
+#define FSE_AZ_TIMER_EV 10
+#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define FSE_AZ_WAKE_UP_EV 6
+#define FSE_AZ_SRM_UPD_DONE_EV 5
+#define FSE_AB_EVQ_NOT_EN_EV 3
+#define FSE_AZ_EVQ_INIT_DONE_EV 2
+#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+/* EVENT_ENTRY */
+#define FSF_AZ_EV_CODE_LBN 60
+#define FSF_AZ_EV_CODE_WIDTH 4
+#define FSE_CZ_EV_CODE_MCDI_EV 12
+#define FSE_CZ_EV_CODE_USER_EV 8
+#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define FSE_AZ_EV_CODE_DRIVER_EV 5
+#define FSE_AZ_EV_CODE_TX_EV 2
+#define FSE_AZ_EV_CODE_RX_EV 0
+#define FSF_AZ_EV_DATA_LBN 0
+#define FSF_AZ_EV_DATA_WIDTH 60
+
+/* GLOBAL_EV */
+#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12
+#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11
+#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11
+#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1
+#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9
+#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7
+#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+/* LEGACY_INT_VEC */
+#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+/* MC_XGMAC_FLTR_RULE_DEF */
+#define FSF_CZ_MC_XFRC_MODE_LBN 416
+#define FSF_CZ_MC_XFRC_MODE_WIDTH 1
+#define FSE_CZ_MC_XFRC_MODE_LAYERED 1
+#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0
+#define FSF_CZ_MC_XFRC_HASH_LBN 384
+#define FSF_CZ_MC_XFRC_HASH_WIDTH 32
+#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
+#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
+#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
+#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
+#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
+#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
+
+/* RX_EV */
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define FSF_AA_RX_EV_DRIB_NIB_LBN 49
+#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1
+#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define FSF_AZ_RX_EV_PORT_LBN 30
+#define FSF_AZ_RX_EV_PORT_WIDTH 1
+#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define FSF_AZ_RX_EV_SOP_LBN 15
+#define FSF_AZ_RX_EV_SOP_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+/* RX_KER_DESC */
+#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+
+/* RX_USER_DESC */
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+/* TX_EV */
+#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_TX_EV_PORT_LBN 16
+#define FSF_AZ_TX_EV_PORT_WIDTH 1
+#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_COMP_LBN 12
+#define FSF_AZ_TX_EV_COMP_WIDTH 1
+#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+/* TX_KER_DESC */
+#define FSF_AZ_TX_KER_CONT_LBN 62
+#define FSF_AZ_TX_KER_CONT_WIDTH 1
+#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+
+/* TX_USER_DESC */
+#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define FSF_AZ_TX_USER_CONT_LBN 46
+#define FSF_AZ_TX_USER_CONT_WIDTH 1
+#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+/* USER_EV */
+#define FSF_CZ_USER_QID_LBN 32
+#define FSF_CZ_USER_QID_WIDTH 10
+#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+/**************************************************************************
+ *
+ * Falcon B0 PCIe core indirect registers
+ *
+ **************************************************************************
+ */
+
+#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68
+
+#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70
+
+#define FPCR_BB_ACK_RPL_TIMER 0x700
+#define FPCRF_BB_ACK_TL_LBN 0
+#define FPCRF_BB_ACK_TL_WIDTH 16
+#define FPCRF_BB_RPL_TL_LBN 16
+#define FPCRF_BB_RPL_TL_WIDTH 16
+
+#define FPCR_BB_ACK_FREQ 0x70C
+#define FPCRF_BB_ACK_FREQ_LBN 0
+#define FPCRF_BB_ACK_FREQ_WIDTH 7
+
+/**************************************************************************
+ *
+ * Pseudo-registers and fields
+ *
+ **************************************************************************
+ */
+
+/* Interrupt acknowledge work-around register (A0/A1 only) */
+#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+/* Values for the EE_SPI_HCMD_SF_SEL register field */
+#define FFE_AB_SPI_DEVICE_EEPROM 0
+#define FFE_AB_SPI_DEVICE_FLASH 1
+
+/* NIC_STAT_REG: NIC status register */
+#define FRF_AB_STRAP_10G_LBN 2
+#define FRF_AB_STRAP_10G_WIDTH 1
+#define FRF_AA_STRAP_PCIE_LBN 0
+#define FRF_AA_STRAP_PCIE_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define FRF_AZ_FATAL_INTR_LBN 0
+#define FRF_AZ_FATAL_INTR_WIDTH 12
+
+/* SRM_CFG_REG: SRAM configuration register */
+/* We treat the number of SRAM banks and bank size as a single field */
+#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN
+#define FRF_AZ_SRM_NB_SZ_WIDTH \
+ (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH)
+#define FFE_AB_SRM_NB1_SZ2M 0
+#define FFE_AB_SRM_NB1_SZ4M 1
+#define FFE_AB_SRM_NB1_SZ8M 2
+#define FFE_AB_SRM_NB_SZ_DEF 3
+#define FFE_AB_SRM_NB2_SZ4M 4
+#define FFE_AB_SRM_NB2_SZ8M 5
+#define FFE_AB_SRM_NB2_SZ16M 6
+#define FFE_AB_SRM_NB_SZ_RES 7
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+/* We write just the last dword of these registers */
+#define FR_AZ_RX_DESC_UPD_DWORD_P0 \
+ (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \
+ FR_BZ_RX_DESC_UPD_P0 + 3 * 4)
+#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32)
+#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define FR_AZ_TX_DESC_UPD_DWORD_P0 \
+ (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \
+ FR_BZ_TX_DESC_UPD_P0 + 3 * 4)
+#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32)
+#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \
+ FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH)
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \
+ FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH)
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+/* Default values */
+#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */
+#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */
+#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+/* XGXS all-lanes status fields */
+#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN
+#define FRF_AB_XX_SYNC_STAT_WIDTH 4
+#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN
+#define FRF_AB_XX_COMMA_DET_WIDTH 4
+#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN
+#define FRF_AB_XX_CHAR_ERR_WIDTH 4
+#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN
+#define FRF_AB_XX_DISPERR_WIDTH 4
+#define FFE_AB_XX_STAT_ALL_LANES 0xf
+#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN
+#define FRF_AB_XX_FORCE_SIG_WIDTH 8
+#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
+
+/* RX_MAC_FILTER_TBL0 */
+/* RMFT_DEST_MAC is wider than 32 bits */
+#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN
+#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32)
+#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32)
+
+/* TX_MAC_FILTER_TBL0 */
+/* TMFT_SRC_MAC is wider than 32 bits */
+#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN
+#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32)
+#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32)
+
+/* TX_PACE_TBL */
+/* Values >20 are documented as reserved, but will result in a queue going
+ * into the fast bin with a pace value of zero. */
+#define FFE_BZ_TX_PACE_OFF 0
+#define FFE_BZ_TX_PACE_RESERVED 21
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+/* EVENT_ENTRY */
+/* Magic number field for event test */
+#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
+#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
+
+/* RX packet prefix */
+#define FS_BZ_RX_PREFIX_HASH_OFST 12
+#define FS_BZ_RX_PREFIX_SIZE 16
+
+#endif /* EFX_FARCH_REGS_H */
diff --git a/kernel/drivers/net/ethernet/sfc/filter.h b/kernel/drivers/net/ethernet/sfc/filter.h
new file mode 100644
index 000000000..d0ed7f71e
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/filter.h
@@ -0,0 +1,272 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_FILTER_H
+#define EFX_FILTER_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
+
+/**
+ * enum efx_filter_match_flags - Flags for hardware filter match type
+ * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
+ * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
+ * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
+ * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
+ * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
+ * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
+ * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
+ * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
+ * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
+ * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
+ * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
+ * Used for RX default unicast and multicast/broadcast filters.
+ *
+ * Only some combinations are supported, depending on NIC type:
+ *
+ * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
+ * local 2-tuple (only implemented for Falcon B0)
+ *
+ * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
+ * or local 2-tuple, or local MAC with or without outer VID, and RX
+ * default filters
+ *
+ * - Huntington supports filter matching controlled by firmware, potentially
+ * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
+ * with or without outer and inner VID
+ */
+enum efx_filter_match_flags {
+ EFX_FILTER_MATCH_REM_HOST = 0x0001,
+ EFX_FILTER_MATCH_LOC_HOST = 0x0002,
+ EFX_FILTER_MATCH_REM_MAC = 0x0004,
+ EFX_FILTER_MATCH_REM_PORT = 0x0008,
+ EFX_FILTER_MATCH_LOC_MAC = 0x0010,
+ EFX_FILTER_MATCH_LOC_PORT = 0x0020,
+ EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
+ EFX_FILTER_MATCH_INNER_VID = 0x0080,
+ EFX_FILTER_MATCH_OUTER_VID = 0x0100,
+ EFX_FILTER_MATCH_IP_PROTO = 0x0200,
+ EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
+};
+
+/**
+ * enum efx_filter_priority - priority of a hardware filter specification
+ * @EFX_FILTER_PRI_HINT: Performance hint
+ * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list
+ * or hardware requirements. This may only be used by the filter
+ * implementation for each NIC type.
+ * @EFX_FILTER_PRI_MANUAL: Manually configured filter
+ * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
+ * networking and SR-IOV)
+ */
+enum efx_filter_priority {
+ EFX_FILTER_PRI_HINT = 0,
+ EFX_FILTER_PRI_AUTO,
+ EFX_FILTER_PRI_MANUAL,
+ EFX_FILTER_PRI_REQUIRED,
+};
+
+/**
+ * enum efx_filter_flags - flags for hardware filter specifications
+ * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
+ * By default, matching packets will be delivered only to the
+ * specified queue. If this flag is set, they will be delivered
+ * to a range of queues offset from the specified queue number
+ * according to the indirection table.
+ * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
+ * queue.
+ * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
+ * overriding an automatic filter (priority
+ * %EFX_FILTER_PRI_AUTO). This may only be set by the filter
+ * implementation for each type. A removal request will restore
+ * the automatic filter in its place.
+ * @EFX_FILTER_FLAG_RX: Filter is for RX
+ * @EFX_FILTER_FLAG_TX: Filter is for TX
+ */
+enum efx_filter_flags {
+ EFX_FILTER_FLAG_RX_RSS = 0x01,
+ EFX_FILTER_FLAG_RX_SCATTER = 0x02,
+ EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
+ EFX_FILTER_FLAG_RX = 0x08,
+ EFX_FILTER_FLAG_TX = 0x10,
+};
+
+/**
+ * struct efx_filter_spec - specification for a hardware filter
+ * @match_flags: Match type flags, from &enum efx_filter_match_flags
+ * @priority: Priority of the filter, from &enum efx_filter_priority
+ * @flags: Miscellaneous flags, from &enum efx_filter_flags
+ * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set
+ * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
+ * an RX drop filter
+ * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
+ * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
+ * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
+ * %EFX_FILTER_MATCH_LOC_MAC_IG is set
+ * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
+ * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
+ * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
+ * is set
+ * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
+ * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
+ * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
+ * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
+ *
+ * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
+ * used to initialise the structure. The efx_filter_set_*() functions
+ * may then be used to set @rss_context, @match_flags and related
+ * fields.
+ *
+ * The @priority field is used by software to determine whether a new
+ * filter may replace an old one. The hardware priority of a filter
+ * depends on which fields are matched.
+ */
+struct efx_filter_spec {
+ u32 match_flags:12;
+ u32 priority:2;
+ u32 flags:6;
+ u32 dmaq_id:12;
+ u32 rss_context;
+ __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
+ __be16 inner_vid;
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ __be16 ether_type;
+ u8 ip_proto;
+ __be32 loc_host[4];
+ __be32 rem_host[4];
+ __be16 loc_port;
+ __be16 rem_port;
+ /* total 64 bytes */
+};
+
+enum {
+ EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
+ EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
+};
+
+static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
+ enum efx_filter_priority priority,
+ enum efx_filter_flags flags,
+ unsigned rxq_id)
+{
+ memset(spec, 0, sizeof(*spec));
+ spec->priority = priority;
+ spec->flags = EFX_FILTER_FLAG_RX | flags;
+ spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
+ spec->dmaq_id = rxq_id;
+}
+
+static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
+ unsigned txq_id)
+{
+ memset(spec, 0, sizeof(*spec));
+ spec->priority = EFX_FILTER_PRI_REQUIRED;
+ spec->flags = EFX_FILTER_FLAG_TX;
+ spec->dmaq_id = txq_id;
+}
+
+/**
+ * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = host;
+ spec->loc_port = port;
+ return 0;
+}
+
+/**
+ * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @lhost: Local host address (network byte order)
+ * @lport: Local port (network byte order)
+ * @rhost: Remote host address (network byte order)
+ * @rport: Remote port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+ __be32 lhost, __be16 lport,
+ __be32 rhost, __be16 rport)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = lhost;
+ spec->loc_port = lport;
+ spec->rem_host[0] = rhost;
+ spec->rem_port = rport;
+ return 0;
+}
+
+enum {
+ EFX_FILTER_VID_UNSPEC = 0xffff,
+};
+
+/**
+ * efx_filter_set_eth_local - specify local Ethernet address and/or VID
+ * @spec: Specification to initialise
+ * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
+ * @addr: Local Ethernet MAC address, or %NULL
+ */
+static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr)
+{
+ if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
+ return -EINVAL;
+
+ if (vid != EFX_FILTER_VID_UNSPEC) {
+ spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec->outer_vid = htons(vid);
+ }
+ if (addr != NULL) {
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ ether_addr_copy(spec->loc_mac, addr);
+ }
+ return 0;
+}
+
+/**
+ * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ return 0;
+}
+
+/**
+ * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ spec->loc_mac[0] = 1;
+ return 0;
+}
+
+#endif /* EFX_FILTER_H */
diff --git a/kernel/drivers/net/ethernet/sfc/io.h b/kernel/drivers/net/ethernet/sfc/io.h
new file mode 100644
index 000000000..afb94aa2c
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/io.h
@@ -0,0 +1,302 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_IO_H
+#define EFX_IO_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+/**************************************************************************
+ *
+ * NIC register I/O
+ *
+ **************************************************************************
+ *
+ * Notes on locking strategy for the Falcon architecture:
+ *
+ * Many CSRs are very wide and cannot be read or written atomically.
+ * Writes from the host are buffered by the Bus Interface Unit (BIU)
+ * up to 128 bits. Whenever the host writes part of such a register,
+ * the BIU collects the written value and does not write to the
+ * underlying register until all 4 dwords have been written. A
+ * similar buffering scheme applies to host access to the NIC's 64-bit
+ * SRAM.
+ *
+ * Writes to different CSRs and 64-bit SRAM words must be serialised,
+ * since interleaved access can result in lost writes. We use
+ * efx_nic::biu_lock for this.
+ *
+ * We also serialise reads from 128-bit CSRs and SRAM with the same
+ * spinlock. This may not be necessary, but it doesn't really matter
+ * as there are no such reads on the fast path.
+ *
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * 128-bit but are special-cased in the BIU to avoid the need for
+ * locking in the host:
+ *
+ * - They are write-only.
+ * - The semantics of writing to these registers are such that
+ * replacing the low 96 bits with zero does not affect functionality.
+ * - If the host writes to the last dword address of such a register
+ * (i.e. the high 32 bits) the underlying register will always be
+ * written. If the collector and the current write together do not
+ * provide values for all 128 bits of the register, the low 96 bits
+ * will be written as zero.
+ * - If the host writes to the address of any other part of such a
+ * register while the collector already holds values for some other
+ * register, the write is discarded and the collector maintains its
+ * current state.
+ *
+ * The EF10 architecture exposes very few registers to the host and
+ * most of them are only 32 bits wide. The only exceptions are the MC
+ * doorbell register pair, which has its own latching, and
+ * TX_DESC_UPD, which works in a similar way to the Falcon
+ * architecture.
+ */
+
+#if BITS_PER_LONG == 64
+#define EFX_USE_QWORD_IO 1
+#endif
+
+/* Hardware issue requires that only 64-bit naturally aligned writes
+ * are seen by hardware. Its not strictly necessary to restrict to
+ * x86_64 arch, but done for safety since unusual write combining behaviour
+ * can break PIO.
+ */
+#ifdef CONFIG_X86_64
+/* PIO is a win only if write-combining is possible */
+#ifdef ARCH_HAS_IOREMAP_WC
+#define EFX_USE_PIO 1
+#endif
+#endif
+
+#ifdef EFX_USE_QWORD_IO
+static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
+ unsigned int reg)
+{
+ __raw_writeq((__force u64)value, efx->membase + reg);
+}
+static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
+{
+ return (__force __le64)__raw_readq(efx->membase + reg);
+}
+#endif
+
+static inline void _efx_writed(struct efx_nic *efx, __le32 value,
+ unsigned int reg)
+{
+ __raw_writel((__force u32)value, efx->membase + reg);
+}
+static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
+{
+ return (__force __le32)__raw_readl(efx->membase + reg);
+}
+
+/* Write a normal 128-bit CSR, locking as appropriate. */
+static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
+ unsigned int reg)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+ _efx_writeq(efx, value->u64[0], reg + 0);
+ _efx_writeq(efx, value->u64[1], reg + 8);
+#else
+ _efx_writed(efx, value->u32[0], reg + 0);
+ _efx_writed(efx, value->u32[1], reg + 4);
+ _efx_writed(efx, value->u32[2], reg + 8);
+ _efx_writed(efx, value->u32[3], reg + 12);
+#endif
+ mmiowb();
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
+static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
+ const efx_qword_t *value, unsigned int index)
+{
+ unsigned int addr = index * sizeof(*value);
+ unsigned long flags __attribute__ ((unused));
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing SRAM address %x with " EFX_QWORD_FMT "\n",
+ addr, EFX_QWORD_VAL(*value));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+ __raw_writeq((__force u64)value->u64[0], membase + addr);
+#else
+ __raw_writel((__force u32)value->u32[0], membase + addr);
+ __raw_writel((__force u32)value->u32[1], membase + addr + 4);
+#endif
+ mmiowb();
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
+static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
+ unsigned int reg)
+{
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with "EFX_DWORD_FMT"\n",
+ reg, EFX_DWORD_VAL(*value));
+
+ /* No lock required */
+ _efx_writed(efx, value->u32[0], reg);
+}
+
+/* Read a 128-bit CSR, locking as appropriate. */
+static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+ value->u32[0] = _efx_readd(efx, reg + 0);
+ value->u32[1] = _efx_readd(efx, reg + 4);
+ value->u32[2] = _efx_readd(efx, reg + 8);
+ value->u32[3] = _efx_readd(efx, reg + 12);
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+}
+
+/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
+static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
+ efx_qword_t *value, unsigned int index)
+{
+ unsigned int addr = index * sizeof(*value);
+ unsigned long flags __attribute__ ((unused));
+
+ spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+ value->u64[0] = (__force __le64)__raw_readq(membase + addr);
+#else
+ value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+ value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
+#endif
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
+ addr, EFX_QWORD_VAL(*value));
+}
+
+/* Read a 32-bit CSR or SRAM */
+static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg)
+{
+ value->u32[0] = _efx_readd(efx, reg);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got "EFX_DWORD_FMT"\n",
+ reg, EFX_DWORD_VAL(*value));
+}
+
+/* Write a 128-bit CSR forming part of a table */
+static inline void
+efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Read a 128-bit CSR forming part of a table */
+static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Page size used as step between per-VI registers */
+#define EFX_VI_PAGE_SIZE 0x2000
+
+/* Calculate offset to page-mapped register */
+#define EFX_PAGED_REG(page, reg) \
+ ((page) * EFX_VI_PAGE_SIZE + (reg))
+
+/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
+static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
+ unsigned int reg, unsigned int page)
+{
+ reg = EFX_PAGED_REG(page, reg);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
+
+#ifdef EFX_USE_QWORD_IO
+ _efx_writeq(efx, value->u64[0], reg + 0);
+ _efx_writeq(efx, value->u64[1], reg + 8);
+#else
+ _efx_writed(efx, value->u32[0], reg + 0);
+ _efx_writed(efx, value->u32[1], reg + 4);
+ _efx_writed(efx, value->u32[2], reg + 8);
+ _efx_writed(efx, value->u32[3], reg + 12);
+#endif
+}
+#define efx_writeo_page(efx, value, reg, page) \
+ _efx_writeo_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
+ page)
+
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
+ * high bits of RX_DESC_UPD or TX_DESC_UPD)
+ */
+static inline void
+_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
+ unsigned int reg, unsigned int page)
+{
+ efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+}
+#define efx_writed_page(efx, value, reg, page) \
+ _efx_writed_page(efx, value, \
+ reg + \
+ BUILD_BUG_ON_ZERO((reg) != 0x400 && \
+ (reg) != 0x420 && \
+ (reg) != 0x830 && \
+ (reg) != 0x83c && \
+ (reg) != 0xa18 && \
+ (reg) != 0xa1c), \
+ page)
+
+/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
+ * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
+ * collector register.
+ */
+static inline void _efx_writed_page_locked(struct efx_nic *efx,
+ const efx_dword_t *value,
+ unsigned int reg,
+ unsigned int page)
+{
+ unsigned long flags __attribute__ ((unused));
+
+ if (page == 0) {
+ spin_lock_irqsave(&efx->biu_lock, flags);
+ efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ spin_unlock_irqrestore(&efx->biu_lock, flags);
+ } else {
+ efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+ }
+}
+#define efx_writed_page_locked(efx, value, reg, page) \
+ _efx_writed_page_locked(efx, value, \
+ reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
+ page)
+
+#endif /* EFX_IO_H */
diff --git a/kernel/drivers/net/ethernet/sfc/mcdi.c b/kernel/drivers/net/ethernet/sfc/mcdi.c
new file mode 100644
index 000000000..d37928f01
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/mcdi.c
@@ -0,0 +1,1891 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/delay.h>
+#include <asm/cmpxchg.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "io.h"
+#include "farch_regs.h"
+#include "mcdi_pcol.h"
+#include "phy.h"
+
+/**************************************************************************
+ *
+ * Management-Controller-to-Driver Interface
+ *
+ **************************************************************************
+ */
+
+#define MCDI_RPC_TIMEOUT (10 * HZ)
+
+/* A reboot/assertion causes the MCDI status word to be set after the
+ * command word is set or a REBOOT event is sent. If we notice a reboot
+ * via these mechanisms then wait 250ms for the status word to be set.
+ */
+#define MCDI_STATUS_DELAY_US 100
+#define MCDI_STATUS_DELAY_COUNT 2500
+#define MCDI_STATUS_SLEEP_MS \
+ (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
+
+#define SEQ_MASK \
+ EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
+
+struct efx_mcdi_async_param {
+ struct list_head list;
+ unsigned int cmd;
+ size_t inlen;
+ size_t outlen;
+ bool quiet;
+ efx_mcdi_async_completer *complete;
+ unsigned long cookie;
+ /* followed by request/response buffer */
+};
+
+static void efx_mcdi_timeout_async(unsigned long context);
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached_out);
+static bool efx_mcdi_poll_once(struct efx_nic *efx);
+static void efx_mcdi_abandon(struct efx_nic *efx);
+
+int efx_mcdi_init(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi;
+ bool already_attached;
+ int rc;
+
+ efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
+ if (!efx->mcdi)
+ return -ENOMEM;
+
+ mcdi = efx_mcdi(efx);
+ mcdi->efx = efx;
+ init_waitqueue_head(&mcdi->wq);
+ spin_lock_init(&mcdi->iface_lock);
+ mcdi->state = MCDI_STATE_QUIESCENT;
+ mcdi->mode = MCDI_MODE_POLL;
+ spin_lock_init(&mcdi->async_lock);
+ INIT_LIST_HEAD(&mcdi->async_list);
+ setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
+ (unsigned long)mcdi);
+
+ (void) efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+
+ /* Recover from a failed assertion before probing */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ /* Let the MC (and BMC, if this is a LOM) know that the driver
+ * is loaded. We should do this before we reset the NIC.
+ */
+ rc = efx_mcdi_drv_attach(efx, true, &already_attached);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Unable to register driver with MCPU\n");
+ return rc;
+ }
+ if (already_attached)
+ /* Not a fatal error */
+ netif_err(efx, probe, efx->net_dev,
+ "Host already registered with MCPU\n");
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+ efx->primary = efx;
+
+ return 0;
+}
+
+void efx_mcdi_fini(struct efx_nic *efx)
+{
+ if (!efx->mcdi)
+ return;
+
+ BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
+
+ /* Relinquish the device (back to the BMC, if this is a LOM) */
+ efx_mcdi_drv_attach(efx, false, NULL);
+
+ kfree(efx->mcdi);
+}
+
+static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ efx_dword_t hdr[2];
+ size_t hdr_len;
+ u32 xflags, seqno;
+
+ BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
+
+ /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ seqno = mcdi->seqno & SEQ_MASK;
+ xflags = 0;
+ if (mcdi->mode == MCDI_MODE_EVENTS)
+ xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+
+ if (efx->type->mcdi_max_ver == 1) {
+ /* MCDI v1 */
+ EFX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, cmd,
+ MCDI_HEADER_DATALEN, inlen,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+ hdr_len = 4;
+ } else {
+ /* MCDI v2 */
+ BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
+ EFX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+ EFX_POPULATE_DWORD_2(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
+ hdr_len = 8;
+ }
+
+ efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
+
+ mcdi->new_epoch = false;
+}
+
+static int efx_mcdi_errno(unsigned int mcdi_err)
+{
+ switch (mcdi_err) {
+ case 0:
+ return 0;
+#define TRANSLATE_ERROR(name) \
+ case MC_CMD_ERR_ ## name: \
+ return -name;
+ TRANSLATE_ERROR(EPERM);
+ TRANSLATE_ERROR(ENOENT);
+ TRANSLATE_ERROR(EINTR);
+ TRANSLATE_ERROR(EAGAIN);
+ TRANSLATE_ERROR(EACCES);
+ TRANSLATE_ERROR(EBUSY);
+ TRANSLATE_ERROR(EINVAL);
+ TRANSLATE_ERROR(EDEADLK);
+ TRANSLATE_ERROR(ENOSYS);
+ TRANSLATE_ERROR(ETIME);
+ TRANSLATE_ERROR(EALREADY);
+ TRANSLATE_ERROR(ENOSPC);
+#undef TRANSLATE_ERROR
+ case MC_CMD_ERR_ENOTSUP:
+ return -EOPNOTSUPP;
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return -ENOBUFS;
+ case MC_CMD_ERR_MAC_EXIST:
+ return -EADDRINUSE;
+ default:
+ return -EPROTO;
+ }
+}
+
+static void efx_mcdi_read_response_header(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ unsigned int respseq, respcmd, error;
+ efx_dword_t hdr;
+
+ efx->type->mcdi_read_response(efx, &hdr, 0, 4);
+ respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
+ respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
+ error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
+
+ if (respcmd != MC_CMD_V2_EXTN) {
+ mcdi->resp_hdr_len = 4;
+ mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
+ } else {
+ efx->type->mcdi_read_response(efx, &hdr, 4, 4);
+ mcdi->resp_hdr_len = 8;
+ mcdi->resp_data_len =
+ EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ if (error && mcdi->resp_data_len == 0) {
+ netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
+ mcdi->resprc = -EIO;
+ } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+ respseq, mcdi->seqno);
+ mcdi->resprc = -EIO;
+ } else if (error) {
+ efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
+ mcdi->resprc =
+ efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
+ } else {
+ mcdi->resprc = 0;
+ }
+}
+
+static bool efx_mcdi_poll_once(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ rmb();
+ if (!efx->type->mcdi_poll_response(efx))
+ return false;
+
+ spin_lock_bh(&mcdi->iface_lock);
+ efx_mcdi_read_response_header(efx);
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ return true;
+}
+
+static int efx_mcdi_poll(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ unsigned long time, finish;
+ unsigned int spins;
+ int rc;
+
+ /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
+ rc = efx_mcdi_poll_reboot(efx);
+ if (rc) {
+ spin_lock_bh(&mcdi->iface_lock);
+ mcdi->resprc = rc;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ spin_unlock_bh(&mcdi->iface_lock);
+ return 0;
+ }
+
+ /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
+ * because generally mcdi responses are fast. After that, back off
+ * and poll once a jiffy (approximately)
+ */
+ spins = TICK_USEC;
+ finish = jiffies + MCDI_RPC_TIMEOUT;
+
+ while (1) {
+ if (spins != 0) {
+ --spins;
+ udelay(1);
+ } else {
+ schedule_timeout_uninterruptible(1);
+ }
+
+ time = jiffies;
+
+ if (efx_mcdi_poll_once(efx))
+ break;
+
+ if (time_after(time, finish))
+ return -ETIMEDOUT;
+ }
+
+ /* Return rc=0 like wait_event_timeout() */
+ return 0;
+}
+
+/* Test and clear MC-rebooted flag for this port/function; reset
+ * software state as necessary.
+ */
+int efx_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ if (!efx->mcdi)
+ return 0;
+
+ return efx->type->mcdi_poll_reboot(efx);
+}
+
+static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
+{
+ return cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
+ MCDI_STATE_QUIESCENT;
+}
+
+static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
+{
+ /* Wait until the interface becomes QUIESCENT and we win the race
+ * to mark it RUNNING_SYNC.
+ */
+ wait_event(mcdi->wq,
+ cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
+ MCDI_STATE_QUIESCENT);
+}
+
+static int efx_mcdi_await_completion(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
+ MCDI_RPC_TIMEOUT) == 0)
+ return -ETIMEDOUT;
+
+ /* Check if efx_mcdi_set_mode() switched us back to polled completions.
+ * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
+ * completed the request first, then we'll just end up completing the
+ * request again, which is safe.
+ *
+ * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
+ * wait_event_timeout() implicitly provides.
+ */
+ if (mcdi->mode == MCDI_MODE_POLL)
+ return efx_mcdi_poll(efx);
+
+ return 0;
+}
+
+/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
+ * requester. Return whether this was done. Does not take any locks.
+ */
+static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
+{
+ if (cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
+ MCDI_STATE_RUNNING_SYNC) {
+ wake_up(&mcdi->wq);
+ return true;
+ }
+
+ return false;
+}
+
+static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
+{
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ struct efx_mcdi_async_param *async;
+ struct efx_nic *efx = mcdi->efx;
+
+ /* Process the asynchronous request queue */
+ spin_lock_bh(&mcdi->async_lock);
+ async = list_first_entry_or_null(
+ &mcdi->async_list, struct efx_mcdi_async_param, list);
+ if (async) {
+ mcdi->state = MCDI_STATE_RUNNING_ASYNC;
+ efx_mcdi_send_request(efx, async->cmd,
+ (const efx_dword_t *)(async + 1),
+ async->inlen);
+ mod_timer(&mcdi->async_timer,
+ jiffies + MCDI_RPC_TIMEOUT);
+ }
+ spin_unlock_bh(&mcdi->async_lock);
+
+ if (async)
+ return;
+ }
+
+ mcdi->state = MCDI_STATE_QUIESCENT;
+ wake_up(&mcdi->wq);
+}
+
+/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
+ * asynchronous completion function, and release the interface.
+ * Return whether this was done. Must be called in bh-disabled
+ * context. Will take iface_lock and async_lock.
+ */
+static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
+{
+ struct efx_nic *efx = mcdi->efx;
+ struct efx_mcdi_async_param *async;
+ size_t hdr_len, data_len, err_len;
+ efx_dword_t *outbuf;
+ MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ int rc;
+
+ if (cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
+ MCDI_STATE_RUNNING_ASYNC)
+ return false;
+
+ spin_lock(&mcdi->iface_lock);
+ if (timeout) {
+ /* Ensure that if the completion event arrives later,
+ * the seqno check in efx_mcdi_ev_cpl() will fail
+ */
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ rc = -ETIMEDOUT;
+ hdr_len = 0;
+ data_len = 0;
+ } else {
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ }
+ spin_unlock(&mcdi->iface_lock);
+
+ /* Stop the timer. In case the timer function is running, we
+ * must wait for it to return so that there is no possibility
+ * of it aborting the next request.
+ */
+ if (!timeout)
+ del_timer_sync(&mcdi->async_timer);
+
+ spin_lock(&mcdi->async_lock);
+ async = list_first_entry(&mcdi->async_list,
+ struct efx_mcdi_async_param, list);
+ list_del(&async->list);
+ spin_unlock(&mcdi->async_lock);
+
+ outbuf = (efx_dword_t *)(async + 1);
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(async->outlen, data_len));
+ if (!timeout && rc && !async->quiet) {
+ err_len = min(sizeof(errbuf), data_len);
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len,
+ sizeof(errbuf));
+ efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
+ err_len, rc);
+ }
+ async->complete(efx, async->cookie, rc, outbuf, data_len);
+ kfree(async);
+
+ efx_mcdi_release(mcdi);
+
+ return true;
+}
+
+static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
+ unsigned int datalen, unsigned int mcdi_err)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool wake = false;
+
+ spin_lock(&mcdi->iface_lock);
+
+ if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
+ if (mcdi->credits)
+ /* The request has been cancelled */
+ --mcdi->credits;
+ else
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx "
+ "seq 0x%x\n", seqno, mcdi->seqno);
+ } else {
+ if (efx->type->mcdi_max_ver >= 2) {
+ /* MCDI v2 responses don't fit in an event */
+ efx_mcdi_read_response_header(efx);
+ } else {
+ mcdi->resprc = efx_mcdi_errno(mcdi_err);
+ mcdi->resp_hdr_len = 4;
+ mcdi->resp_data_len = datalen;
+ }
+
+ wake = true;
+ }
+
+ spin_unlock(&mcdi->iface_lock);
+
+ if (wake) {
+ if (!efx_mcdi_complete_async(mcdi, false))
+ (void) efx_mcdi_complete_sync(mcdi);
+
+ /* If the interface isn't RUNNING_ASYNC or
+ * RUNNING_SYNC then we've received a duplicate
+ * completion after we've already transitioned back to
+ * QUIESCENT. [A subsequent invocation would increment
+ * seqno, so would have failed the seqno check].
+ */
+ }
+}
+
+static void efx_mcdi_timeout_async(unsigned long context)
+{
+ struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
+
+ efx_mcdi_complete_async(mcdi, true);
+}
+
+static int
+efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
+{
+ if (efx->type->mcdi_max_ver < 0 ||
+ (efx->type->mcdi_max_ver < 2 &&
+ cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
+ return -EINVAL;
+
+ if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
+ (efx->type->mcdi_max_ver < 2 &&
+ inlen > MCDI_CTL_SDU_LEN_MAX_V1))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ int rc;
+
+ if (mcdi->mode == MCDI_MODE_POLL)
+ rc = efx_mcdi_poll(efx);
+ else
+ rc = efx_mcdi_await_completion(efx);
+
+ if (rc != 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d mode %d timed out\n",
+ cmd, (int)inlen, mcdi->mode);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
+ netif_err(efx, hw, efx->net_dev,
+ "MCDI request was completed without an event\n");
+ rc = 0;
+ }
+
+ efx_mcdi_abandon(efx);
+
+ /* Close the race with efx_mcdi_ev_cpl() executing just too late
+ * and completing a request we've just cancelled, by ensuring
+ * that the seqno check therein fails.
+ */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ spin_unlock_bh(&mcdi->iface_lock);
+ }
+
+ if (rc != 0) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ } else {
+ size_t hdr_len, data_len, err_len;
+
+ /* At the very least we need a memory barrier here to ensure
+ * we pick up changes from efx_mcdi_ev_cpl(). Protect against
+ * a spurious efx_mcdi_ev_cpl() running concurrently by
+ * acquiring the iface_lock. */
+ spin_lock_bh(&mcdi->iface_lock);
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ err_len = min(sizeof(errbuf), data_len);
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ BUG_ON(rc > 0);
+
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(outlen, data_len));
+ if (outlen_actual)
+ *outlen_actual = data_len;
+
+ efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
+
+ if (cmd == MC_CMD_REBOOT && rc == -EIO) {
+ /* Don't reset if MC_CMD_REBOOT returns EIO */
+ } else if (rc == -EIO || rc == -EINTR) {
+ netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
+ -rc);
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ } else if (rc && !quiet) {
+ efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
+ rc);
+ }
+
+ if (rc == -EIO || rc == -EINTR) {
+ msleep(MCDI_STATUS_SLEEP_MS);
+ efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+ }
+ }
+
+ efx_mcdi_release(mcdi);
+ return rc;
+}
+
+static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ int rc;
+
+ rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ if (rc) {
+ if (outlen_actual)
+ *outlen_actual = 0;
+ return rc;
+ }
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, quiet);
+}
+
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
+
+/* Normally, on receiving an error code in the MCDI response,
+ * efx_mcdi_rpc will log an error message containing (among other
+ * things) the raw error code, by means of efx_mcdi_display_error.
+ * This _quiet version suppresses that; if the caller wishes to log
+ * the error conditionally on the return code, it should call this
+ * function and is then responsible for calling efx_mcdi_display_error
+ * as needed.
+ */
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, true);
+}
+
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ int rc;
+
+ rc = efx_mcdi_check_supported(efx, cmd, inlen);
+ if (rc)
+ return rc;
+
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
+ if (mcdi->mode == MCDI_MODE_FAIL)
+ return -ENETDOWN;
+
+ efx_mcdi_acquire_sync(mcdi);
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+ return 0;
+}
+
+static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie, bool quiet)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ struct efx_mcdi_async_param *async;
+ int rc;
+
+ rc = efx_mcdi_check_supported(efx, cmd, inlen);
+ if (rc)
+ return rc;
+
+ if (efx->mc_bist_for_other_fn)
+ return -ENETDOWN;
+
+ async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
+ GFP_ATOMIC);
+ if (!async)
+ return -ENOMEM;
+
+ async->cmd = cmd;
+ async->inlen = inlen;
+ async->outlen = outlen;
+ async->quiet = quiet;
+ async->complete = complete;
+ async->cookie = cookie;
+ memcpy(async + 1, inbuf, inlen);
+
+ spin_lock_bh(&mcdi->async_lock);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ list_add_tail(&async->list, &mcdi->async_list);
+
+ /* If this is at the front of the queue, try to start it
+ * immediately
+ */
+ if (mcdi->async_list.next == &async->list &&
+ efx_mcdi_acquire_async(mcdi)) {
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+ mod_timer(&mcdi->async_timer,
+ jiffies + MCDI_RPC_TIMEOUT);
+ }
+ } else {
+ kfree(async);
+ rc = -ENETDOWN;
+ }
+
+ spin_unlock_bh(&mcdi->async_lock);
+
+ return rc;
+}
+
+/**
+ * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete, unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, false);
+}
+
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen, efx_mcdi_async_completer *complete,
+ unsigned long cookie)
+{
+ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+ cookie, true);
+}
+
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
+
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+ outlen_actual, true);
+}
+
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc)
+{
+ int code = 0, err_arg = 0;
+
+ if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
+ code = MCDI_DWORD(outbuf, ERR_CODE);
+ if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
+ err_arg = MCDI_DWORD(outbuf, ERR_ARG);
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n",
+ cmd, (int)inlen, rc, code, err_arg);
+}
+
+/* Switch to polled MCDI completions. This can be called in various
+ * error conditions with various locks held, so it must be lockless.
+ * Caller is responsible for flushing asynchronous requests later.
+ */
+void efx_mcdi_mode_poll(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi;
+
+ if (!efx->mcdi)
+ return;
+
+ mcdi = efx_mcdi(efx);
+ /* If already in polling mode, nothing to do.
+ * If in fail-fast state, don't switch to polled completion.
+ * FLR recovery will do that later.
+ */
+ if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
+ return;
+
+ /* We can switch from event completion to polled completion, because
+ * mcdi requests are always completed in shared memory. We do this by
+ * switching the mode to POLL'd then completing the request.
+ * efx_mcdi_await_completion() will then call efx_mcdi_poll().
+ *
+ * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
+ * which efx_mcdi_complete_sync() provides for us.
+ */
+ mcdi->mode = MCDI_MODE_POLL;
+
+ efx_mcdi_complete_sync(mcdi);
+}
+
+/* Flush any running or queued asynchronous requests, after event processing
+ * is stopped
+ */
+void efx_mcdi_flush_async(struct efx_nic *efx)
+{
+ struct efx_mcdi_async_param *async, *next;
+ struct efx_mcdi_iface *mcdi;
+
+ if (!efx->mcdi)
+ return;
+
+ mcdi = efx_mcdi(efx);
+
+ /* We must be in poll or fail mode so no more requests can be queued */
+ BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
+
+ del_timer_sync(&mcdi->async_timer);
+
+ /* If a request is still running, make sure we give the MC
+ * time to complete it so that the response won't overwrite our
+ * next request.
+ */
+ if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
+ efx_mcdi_poll(efx);
+ mcdi->state = MCDI_STATE_QUIESCENT;
+ }
+
+ /* Nothing else will access the async list now, so it is safe
+ * to walk it without holding async_lock. If we hold it while
+ * calling a completer then lockdep may warn that we have
+ * acquired locks in the wrong order.
+ */
+ list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
+ async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
+ list_del(&async->list);
+ kfree(async);
+ }
+}
+
+void efx_mcdi_mode_event(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi;
+
+ if (!efx->mcdi)
+ return;
+
+ mcdi = efx_mcdi(efx);
+ /* If already in event completion mode, nothing to do.
+ * If in fail-fast state, don't switch to event completion. FLR
+ * recovery will do that later.
+ */
+ if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
+ return;
+
+ /* We can't switch from polled to event completion in the middle of a
+ * request, because the completion method is specified in the request.
+ * So acquire the interface to serialise the requestors. We don't need
+ * to acquire the iface_lock to change the mode here, but we do need a
+ * write memory barrier ensure that efx_mcdi_rpc() sees it, which
+ * efx_mcdi_acquire() provides.
+ */
+ efx_mcdi_acquire_sync(mcdi);
+ mcdi->mode = MCDI_MODE_EVENTS;
+ efx_mcdi_release(mcdi);
+}
+
+static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ /* If there is an outstanding MCDI request, it has been terminated
+ * either by a BADASSERT or REBOOT event. If the mcdi interface is
+ * in polled mode, then do nothing because the MC reboot handler will
+ * set the header correctly. However, if the mcdi interface is waiting
+ * for a CMDDONE event it won't receive it [and since all MCDI events
+ * are sent to the same queue, we can't be racing with
+ * efx_mcdi_ev_cpl()]
+ *
+ * If there is an outstanding asynchronous request, we can't
+ * complete it now (efx_mcdi_complete() would deadlock). The
+ * reset process will take care of this.
+ *
+ * There's a race here with efx_mcdi_send_request(), because
+ * we might receive a REBOOT event *before* the request has
+ * been copied out. In polled mode (during startup) this is
+ * irrelevant, because efx_mcdi_complete_sync() is ignored. In
+ * event mode, this condition is just an edge-case of
+ * receiving a REBOOT event after posting the MCDI
+ * request. Did the mc reboot before or after the copyout? The
+ * best we can do always is just return failure.
+ */
+ spin_lock(&mcdi->iface_lock);
+ if (efx_mcdi_complete_sync(mcdi)) {
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ mcdi->resprc = rc;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ ++mcdi->credits;
+ }
+ } else {
+ int count;
+
+ /* Consume the status word since efx_mcdi_rpc_finish() won't */
+ for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
+ if (efx_mcdi_poll_reboot(efx))
+ break;
+ udelay(MCDI_STATUS_DELAY_US);
+ }
+ mcdi->new_epoch = true;
+
+ /* Nobody was waiting for an MCDI request, so trigger a reset */
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ }
+
+ spin_unlock(&mcdi->iface_lock);
+}
+
+/* The MC is going down in to BIST mode. set the BIST flag to block
+ * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * (which doesn't actually execute a reset, it waits for the controlling
+ * function to reset it).
+ */
+static void efx_mcdi_ev_bist(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ spin_lock(&mcdi->iface_lock);
+ efx->mc_bist_for_other_fn = true;
+ if (efx_mcdi_complete_sync(mcdi)) {
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ mcdi->resprc = -EIO;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ ++mcdi->credits;
+ }
+ }
+ mcdi->new_epoch = true;
+ efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
+ spin_unlock(&mcdi->iface_lock);
+}
+
+/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
+ * to recover.
+ */
+static void efx_mcdi_abandon(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
+ return; /* it had already been done */
+ netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
+ efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
+}
+
+/* Called from falcon_process_eventq for MCDI events */
+void efx_mcdi_process_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
+ u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
+
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ netif_err(efx, hw, efx->net_dev,
+ "MC watchdog or assertion failure at 0x%x\n", data);
+ efx_mcdi_ev_death(efx, -EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_PMNOTICE:
+ netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(efx,
+ MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
+ MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
+ MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
+ break;
+
+ case MCDI_EVENT_CODE_LINKCHANGE:
+ efx_mcdi_process_link_change(efx, event);
+ break;
+ case MCDI_EVENT_CODE_SENSOREVT:
+ efx_mcdi_sensor_event(efx, event);
+ break;
+ case MCDI_EVENT_CODE_SCHEDERR:
+ netif_dbg(efx, hw, efx->net_dev,
+ "MC Scheduler alert (0x%x)\n", data);
+ break;
+ case MCDI_EVENT_CODE_REBOOT:
+ case MCDI_EVENT_CODE_MC_REBOOT:
+ netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
+ efx_mcdi_ev_death(efx, -EIO);
+ break;
+ case MCDI_EVENT_CODE_MC_BIST:
+ netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
+ efx_mcdi_ev_bist(efx);
+ break;
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+ /* MAC stats are gather lazily. We can ignore this. */
+ break;
+ case MCDI_EVENT_CODE_FLR:
+ efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
+ break;
+ case MCDI_EVENT_CODE_PTP_RX:
+ case MCDI_EVENT_CODE_PTP_FAULT:
+ case MCDI_EVENT_CODE_PTP_PPS:
+ efx_ptp_event(efx, event);
+ break;
+ case MCDI_EVENT_CODE_PTP_TIME:
+ efx_time_sync_event(channel, event);
+ break;
+ case MCDI_EVENT_CODE_TX_FLUSH:
+ case MCDI_EVENT_CODE_RX_FLUSH:
+ /* Two flush events will be sent: one to the same event
+ * queue as completions, and one to event queue 0.
+ * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
+ * flag will be set, and we should ignore the event
+ * because we want to wait for all completions.
+ */
+ BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
+ MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
+ if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
+ efx_ef10_handle_drain_event(efx);
+ break;
+ case MCDI_EVENT_CODE_TX_ERR:
+ case MCDI_EVENT_CODE_RX_ERR:
+ netif_err(efx, hw, efx->net_dev,
+ "%s DMA error (event: "EFX_QWORD_FMT")\n",
+ code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
+ EFX_QWORD_VAL(*event));
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
+ code);
+ }
+}
+
+/**************************************************************************
+ *
+ * Specific request functions
+ *
+ **************************************************************************
+ */
+
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
+{
+ MCDI_DECLARE_BUF(outbuf,
+ max(MC_CMD_GET_VERSION_OUT_LEN,
+ MC_CMD_GET_CAPABILITIES_OUT_LEN));
+ size_t outlength;
+ const __le16 *ver_words;
+ size_t offset;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc)
+ goto fail;
+ if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
+ offset = snprintf(buf, len, "%u.%u.%u.%u",
+ le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
+ le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+
+ /* EF10 may have multiple datapath firmware variants within a
+ * single version. Report which variants are running.
+ */
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
+ offset += snprintf(
+ buf + offset, len - offset, " rx? tx?");
+ else
+ offset += snprintf(
+ buf + offset, len - offset, " rx%x tx%x",
+ MCDI_WORD(outbuf,
+ GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
+ MCDI_WORD(outbuf,
+ GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
+
+ /* It's theoretically possible for the string to exceed 31
+ * characters, though in practice the first three version
+ * components are short enough that this doesn't happen.
+ */
+ if (WARN_ON(offset >= len))
+ buf[0] = 0;
+ }
+
+ return;
+
+fail:
+ netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ buf[0] = 0;
+}
+
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
+ driver_operating ? 1 : 0);
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ if (driver_operating) {
+ if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
+ efx->mcdi->fn_flags =
+ MCDI_DWORD(outbuf,
+ DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
+ } else {
+ /* Synthesise flags for Siena */
+ efx->mcdi->fn_flags =
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
+ (efx_port_num(efx) == 0) <<
+ MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
+ }
+ }
+
+ /* We currently assume we have control of the external link
+ * and are completely trusted by firmware. Abort probing
+ * if that's not true for this function.
+ */
+ if (driver_operating &&
+ (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
+ netif_err(efx, probe, efx->net_dev,
+ "This driver version only supports one function per port\n");
+ return -ENODEV;
+ }
+
+ if (was_attached != NULL)
+ *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
+ return 0;
+
+fail:
+ netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ u16 *fw_subtype_list, u32 *capabilities)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
+ size_t outlen, i;
+ int port_num = efx_port_num(efx);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
+ /* we need __aligned(2) for ether_addr_copy */
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ if (mac_address)
+ ether_addr_copy(mac_address,
+ port_num ?
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
+ if (fw_subtype_list) {
+ for (i = 0;
+ i < MCDI_VAR_ARRAY_LEN(outlen,
+ GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+ i++)
+ fw_subtype_list[i] = MCDI_ARRAY_WORD(
+ outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
+ for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
+ fw_subtype_list[i] = 0;
+ }
+ if (capabilities) {
+ if (port_num)
+ *capabilities = MCDI_DWORD(outbuf,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
+ else
+ *capabilities = MCDI_DWORD(outbuf,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
+ }
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
+ __func__, rc, (int)outlen);
+
+ return rc;
+}
+
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
+ u32 dest = 0;
+ int rc;
+
+ if (uart)
+ dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
+ if (evq)
+ dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
+
+ MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
+ MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
+
+ BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+ size_t *size_out, size_t *erase_size_out,
+ bool *protected_out)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
+ *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
+ *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
+ (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
+ case MC_CMD_NVRAM_TEST_PASS:
+ case MC_CMD_NVRAM_TEST_NOTSUPP:
+ return 0;
+ default:
+ return -EIO;
+ }
+}
+
+int efx_mcdi_nvram_test_all(struct efx_nic *efx)
+{
+ u32 nvram_types;
+ unsigned int type;
+ int rc;
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ goto fail1;
+
+ type = 0;
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = efx_mcdi_nvram_test(efx, type);
+ if (rc)
+ goto fail2;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ return 0;
+
+fail2:
+ netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
+ __func__, type);
+fail1:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_read_assertion(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ unsigned int flags, index;
+ const char *reason;
+ size_t outlen;
+ int retry;
+ int rc;
+
+ /* Attempt to read any stored assertion state before we reboot
+ * the mcfw out of the assertion handler. Retry twice, once
+ * because a boot-time assertion might cause this command to fail
+ * with EINTR. And once again because GET_ASSERTS can race with
+ * MC_CMD_REBOOT running on the other port. */
+ retry = 2;
+ do {
+ MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
+ } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
+
+ if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
+ MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
+ outlen, rc);
+ return rc;
+ }
+ if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
+ return -EIO;
+
+ /* Print out any recorded assertion state */
+ flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
+ return 0;
+
+ reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
+ ? "system-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
+ ? "thread-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
+ ? "watchdog reset"
+ : "unknown assertion";
+ netif_err(efx, hw, efx->net_dev,
+ "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
+
+ /* Print out the registers */
+ for (index = 0;
+ index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
+ index++)
+ netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
+ 1 + index,
+ MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
+ index));
+
+ return 0;
+}
+
+static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
+
+ /* If the MC is running debug firmware, it might now be
+ * waiting for a debugger to attach, but we just want it to
+ * reboot. We set a flag that makes the command a no-op if it
+ * has already done so. We don't know what return code to
+ * expect (0 or -EIO), so ignore it.
+ */
+ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
+ MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
+ (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
+}
+
+int efx_mcdi_handle_assertion(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_read_assertion(efx);
+ if (rc)
+ return rc;
+
+ efx_mcdi_exit_assertion(efx);
+
+ return 0;
+}
+
+void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
+ BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
+ BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
+
+ BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_mcdi_reset_func(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
+ MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+ rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+static int efx_mcdi_reset_mc(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ /* White is black, and up is down */
+ if (rc == -EIO)
+ return 0;
+ if (rc == 0)
+ rc = -EIO;
+ return rc;
+}
+
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
+{
+ return RESET_TYPE_RECOVER_OR_ALL;
+}
+
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
+{
+ int rc;
+
+ /* If MCDI is down, we can't handle_assertion */
+ if (method == RESET_TYPE_MCDI_TIMEOUT) {
+ rc = pci_reset_function(efx->pci_dev);
+ if (rc)
+ return rc;
+ /* Re-enable polled MCDI completion */
+ if (efx->mcdi) {
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ mcdi->mode = MCDI_MODE_POLL;
+ }
+ return 0;
+ }
+
+ /* Recover from a failed assertion pre-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ if (method == RESET_TYPE_WORLD)
+ return efx_mcdi_reset_mc(efx);
+ else
+ return efx_mcdi_reset_func(efx);
+}
+
+static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
+ const u8 *mac, int *id_out)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
+ MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
+ MC_CMD_FILTER_MODE_SIMPLE);
+ ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
+
+ return 0;
+
+fail:
+ *id_out = -1;
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+
+}
+
+
+int
+efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
+{
+ return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
+}
+
+
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
+
+ return 0;
+
+fail:
+ *id_out = -1;
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+int efx_mcdi_flush_rxqs(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
+ int rc, count;
+
+ BUILD_BUG_ON(EFX_MAX_CHANNELS >
+ MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
+
+ count = 0;
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (rx_queue->flush_pending) {
+ rx_queue->flush_pending = false;
+ atomic_dec(&efx->rxq_flush_pending);
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ count, efx_rx_queue_index(rx_queue));
+ count++;
+ }
+ }
+ }
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
+ WARN_ON(rc < 0);
+
+ return rc;
+}
+
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
+ return rc;
+}
+
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
+ return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+#ifdef CONFIG_SFC_MTD
+
+#define EFX_MCDI_NVRAM_LEN_MAX 128
+
+static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
+ loff_t offset, u8 *buffer, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf,
+ MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
+ return 0;
+}
+
+static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
+ memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
+ ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
+ NULL, 0, NULL);
+ return rc;
+}
+
+static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk = part->common.mtd.erasesize;
+ int rc = 0;
+
+ if (!part->updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+ if (rc)
+ goto out;
+ part->updating = true;
+ }
+
+ /* The MCDI interface can in fact do multiple erase blocks at once;
+ * but erasing may be slow, so we make multiple calls here to avoid
+ * tripping the MCDI RPC timeout. */
+ while (offset < end) {
+ rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
+ chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ }
+out:
+ return rc;
+}
+
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ if (!part->updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+ if (rc)
+ goto out;
+ part->updating = true;
+ }
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+int efx_mcdi_mtd_sync(struct mtd_info *mtd)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ int rc = 0;
+
+ if (part->updating) {
+ part->updating = false;
+ rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+ }
+
+ return rc;
+}
+
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
+{
+ struct efx_mcdi_mtd_partition *mcdi_part =
+ container_of(part, struct efx_mcdi_mtd_partition, common);
+ struct efx_nic *efx = part->mtd.priv;
+
+ snprintf(part->name, sizeof(part->name), "%s %s:%02x",
+ efx->name, part->type_name, mcdi_part->fw_subtype);
+}
+
+#endif /* CONFIG_SFC_MTD */
diff --git a/kernel/drivers/net/ethernet/sfc/mcdi.h b/kernel/drivers/net/ethernet/sfc/mcdi.h
new file mode 100644
index 000000000..56465f746
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/mcdi.h
@@ -0,0 +1,361 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_MCDI_H
+#define EFX_MCDI_H
+
+/**
+ * enum efx_mcdi_state - MCDI request handling state
+ * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
+ * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
+ * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
+ * Only the thread that moved into this state is allowed to move out of it.
+ * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
+ * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
+ * has not yet consumed the result. For all other threads, equivalent to
+ * %MCDI_STATE_RUNNING.
+ */
+enum efx_mcdi_state {
+ MCDI_STATE_QUIESCENT,
+ MCDI_STATE_RUNNING_SYNC,
+ MCDI_STATE_RUNNING_ASYNC,
+ MCDI_STATE_COMPLETED,
+};
+
+/**
+ * enum efx_mcdi_mode - MCDI transaction mode
+ * @MCDI_MODE_POLL: poll for MCDI completion, until timeout
+ * @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once
+ * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
+ */
+enum efx_mcdi_mode {
+ MCDI_MODE_POLL,
+ MCDI_MODE_EVENTS,
+ MCDI_MODE_FAIL,
+};
+
+/**
+ * struct efx_mcdi_iface - MCDI protocol context
+ * @efx: The associated NIC.
+ * @state: Request handling state. Waited for by @wq.
+ * @mode: Poll for mcdi completion, or wait for an mcdi_event.
+ * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ * @iface_lock: Serialises access to @seqno, @credits and response metadata
+ * @seqno: The next sequence number to use for mcdi requests.
+ * @credits: Number of spurious MCDI completion events allowed before we
+ * trigger a fatal error
+ * @resprc: Response error/success code (Linux numbering)
+ * @resp_hdr_len: Response header length
+ * @resp_data_len: Response data (SDU or error) length
+ * @async_lock: Serialises access to @async_list while event processing is
+ * enabled
+ * @async_list: Queue of asynchronous requests
+ * @async_timer: Timer for asynchronous request timeout
+ */
+struct efx_mcdi_iface {
+ struct efx_nic *efx;
+ enum efx_mcdi_state state;
+ enum efx_mcdi_mode mode;
+ wait_queue_head_t wq;
+ spinlock_t iface_lock;
+ bool new_epoch;
+ unsigned int credits;
+ unsigned int seqno;
+ int resprc;
+ size_t resp_hdr_len;
+ size_t resp_data_len;
+ spinlock_t async_lock;
+ struct list_head async_list;
+ struct timer_list async_timer;
+};
+
+struct efx_mcdi_mon {
+ struct efx_buffer dma_buf;
+ struct mutex update_lock;
+ unsigned long last_update;
+ struct device *device;
+ struct efx_mcdi_mon_attribute *attrs;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ unsigned int n_attrs;
+};
+
+struct efx_mcdi_mtd_partition {
+ struct efx_mtd_partition common;
+ bool updating;
+ u16 nvram_type;
+ u16 fw_subtype;
+};
+
+#define to_efx_mcdi_mtd_partition(mtd) \
+ container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
+
+/**
+ * struct efx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @hwmon: Hardware monitor state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
+ */
+struct efx_mcdi_data {
+ struct efx_mcdi_iface iface;
+#ifdef CONFIG_SFC_MCDI_MON
+ struct efx_mcdi_mon hwmon;
+#endif
+ u32 fn_flags;
+};
+
+static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
+{
+ EFX_BUG_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->iface;
+}
+
+#ifdef CONFIG_SFC_MCDI_MON
+static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
+{
+ EFX_BUG_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->hwmon;
+}
+#endif
+
+int efx_mcdi_init(struct efx_nic *efx);
+void efx_mcdi_fini(struct efx_nic *efx);
+
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
+ size_t inlen, efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
+
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen);
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, size_t *outlen_actual);
+
+typedef void efx_mcdi_async_completer(struct efx_nic *efx,
+ unsigned long cookie, int rc,
+ efx_dword_t *outbuf,
+ size_t outlen_actual);
+int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+ size_t inlen, efx_dword_t *outbuf,
+ size_t outlen, int rc);
+
+int efx_mcdi_poll_reboot(struct efx_nic *efx);
+void efx_mcdi_mode_poll(struct efx_nic *efx);
+void efx_mcdi_mode_event(struct efx_nic *efx);
+void efx_mcdi_flush_async(struct efx_nic *efx);
+
+void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
+
+/* We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned. Also, on Siena we must copy to the MC shared
+ * memory strictly 32 bits at a time, so add any necessary padding.
+ */
+#define MCDI_DECLARE_BUF(_name, _len) \
+ efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \
+ MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
+#define _MCDI_PTR(_buf, _offset) \
+ ((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align) \
+ ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
+#define _MCDI_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_WORD(_buf, _field) \
+ ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_SET_DWORD(_buf, _field, _value) \
+ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
+#define MCDI_DWORD(_buf, _field) \
+ EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
+ _name2, _value2) \
+ EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2)
+#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3) \
+ EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3)
+#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4) \
+ EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4)
+#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5) \
+ EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5)
+#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5, \
+ _name6, _value6) \
+ EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5, \
+ MC_CMD_ ## _name6, _value6)
+#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5, \
+ _name6, _value6, _name7, _value7) \
+ EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5, \
+ MC_CMD_ ## _name6, _value6, \
+ MC_CMD_ ## _name7, _value7)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ EFX_DWORD_0, (u32)(_value)); \
+ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ EFX_DWORD_0, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \
+ (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
+#define MCDI_FIELD(_ptr, _type, _field) \
+ EFX_EXTRACT_DWORD( \
+ *(efx_dword_t *) \
+ _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
+ MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \
+ (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \
+ MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
+
+#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \
+ (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
+ + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
+#define MCDI_DECLARE_STRUCT_PTR(_name) \
+ efx_dword_t *_name
+#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \
+ ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_VAR_ARRAY_LEN(_len, _field) \
+ min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \
+ ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
+#define MCDI_ARRAY_WORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
+ le16_to_cpu(*(__force const __le16 *) \
+ _MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
+#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \
+ (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \
+ EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \
+ EFX_DWORD_0, _value)
+#define MCDI_ARRAY_DWORD(_buf, _field, _index) \
+ EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
+#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \
+ (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \
+ do { \
+ EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
+ EFX_DWORD_0, (u32)(_value)); \
+ EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
+ EFX_DWORD_0, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
+ MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \
+ _type ## _TYPEDEF, _field2)
+
+#define MCDI_EVENT_FIELD(_ev, _field) \
+ EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
+
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ u16 *fw_subtype_list, u32 *capabilities);
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+ size_t *size_out, size_t *erase_size_out,
+ bool *protected_out);
+int efx_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_mcdi_handle_assertion(struct efx_nic *efx);
+void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
+ int *id_out);
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
+int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+int efx_mcdi_port_probe(struct efx_nic *efx);
+void efx_mcdi_port_remove(struct efx_nic *efx);
+int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+int efx_mcdi_set_mac(struct efx_nic *efx);
+#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
+void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+
+#ifdef CONFIG_SFC_MCDI_MON
+int efx_mcdi_mon_probe(struct efx_nic *efx);
+void efx_mcdi_mon_remove(struct efx_nic *efx);
+#else
+static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
+static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
+#endif
+
+#ifdef CONFIG_SFC_MTD
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+#endif
+
+#endif /* EFX_MCDI_H */
diff --git a/kernel/drivers/net/ethernet/sfc/mcdi_mon.c b/kernel/drivers/net/ethernet/sfc/mcdi_mon.c
new file mode 100644
index 000000000..bc27d5b58
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -0,0 +1,534 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/hwmon.h>
+#include <linux/stat.h>
+
+#include "net_driver.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+
+enum efx_hwmon_type {
+ EFX_HWMON_UNKNOWN,
+ EFX_HWMON_TEMP, /* temperature */
+ EFX_HWMON_COOL, /* cooling device, probably a heatsink */
+ EFX_HWMON_IN, /* voltage */
+ EFX_HWMON_CURR, /* current */
+ EFX_HWMON_POWER, /* power */
+ EFX_HWMON_TYPES_COUNT
+};
+
+static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
+ [EFX_HWMON_TEMP] = " degC",
+ [EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */
+ [EFX_HWMON_IN] = " mV",
+ [EFX_HWMON_CURR] = " mA",
+ [EFX_HWMON_POWER] = " W",
+};
+
+static const struct {
+ const char *label;
+ enum efx_hwmon_type hwmon_type;
+ int port;
+} efx_mcdi_sensor_type[] = {
+#define SENSOR(name, label, hwmon_type, port) \
+ [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
+ SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1),
+ SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
+ SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1),
+ SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
+ SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0),
+ SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
+ SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1),
+ SENSOR(IN_1V0, "1.0V supply", IN, -1),
+ SENSOR(IN_1V2, "1.2V supply", IN, -1),
+ SENSOR(IN_1V8, "1.8V supply", IN, -1),
+ SENSOR(IN_2V5, "2.5V supply", IN, -1),
+ SENSOR(IN_3V3, "3.3V supply", IN, -1),
+ SENSOR(IN_12V0, "12.0V supply", IN, -1),
+ SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
+ SENSOR(IN_VREF, "Ref. voltage", IN, -1),
+ SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1),
+ SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1),
+ SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1),
+ SENSOR(PSU_TEMP, "Controller regulator temp.",
+ TEMP, -1),
+ SENSOR(FAN_0, "Fan 0", COOL, -1),
+ SENSOR(FAN_1, "Fan 1", COOL, -1),
+ SENSOR(FAN_2, "Fan 2", COOL, -1),
+ SENSOR(FAN_3, "Fan 3", COOL, -1),
+ SENSOR(FAN_4, "Fan 4", COOL, -1),
+ SENSOR(IN_VAOE, "AOE input supply", IN, -1),
+ SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
+ SENSOR(IN_IAOE, "AOE input current", CURR, -1),
+ SENSOR(NIC_POWER, "Board power use", POWER, -1),
+ SENSOR(IN_0V9, "0.9V supply", IN, -1),
+ SENSOR(IN_I0V9, "0.9V supply current", CURR, -1),
+ SENSOR(IN_I1V2, "1.2V supply current", CURR, -1),
+ SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1),
+ SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1),
+ SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1),
+ SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
+ SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT,
+ "Controller PTAT voltage (int. ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP,
+ "Controller die temp. (int. ADC)", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT_EXTADC,
+ "Controller PTAT voltage (ext. ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
+ "Controller die temp. (ext. ADC)", TEMP, -1),
+ SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
+ SENSOR(AIRFLOW, "Air flow raw", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1),
+ SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1),
+ SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1),
+#undef SENSOR
+};
+
+static const char *const sensor_status_names[] = {
+ [MC_CMD_SENSOR_STATE_OK] = "OK",
+ [MC_CMD_SENSOR_STATE_WARNING] = "Warning",
+ [MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
+ [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
+ [MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
+};
+
+void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+ unsigned int type, state, value;
+ enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
+ const char *name = NULL, *state_txt, *unit;
+
+ type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
+ state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
+ value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
+
+ /* Deal gracefully with the board having more drivers than we
+ * know about, but do not expect new sensor states. */
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
+ name = efx_mcdi_sensor_type[type].label;
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ }
+ if (!name)
+ name = "No sensor name available";
+ EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
+ state_txt = sensor_status_names[state];
+ EFX_BUG_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
+ unit = efx_hwmon_unit[hwmon_type];
+ if (!unit)
+ unit = "";
+
+ netif_err(efx, hw, efx->net_dev,
+ "Sensor %d (%s) reports condition '%s' for value %d%s\n",
+ type, name, state_txt, value, unit);
+}
+
+#ifdef CONFIG_SFC_MCDI_MON
+
+struct efx_mcdi_mon_attribute {
+ struct device_attribute dev_attr;
+ unsigned int index;
+ unsigned int type;
+ enum efx_hwmon_type hwmon_type;
+ unsigned int limit_value;
+ char name[12];
+};
+
+static int efx_mcdi_mon_update(struct efx_nic *efx)
+{
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
+ int rc;
+
+ MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
+ hwmon->dma_buf.dma_addr);
+ MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+ if (rc == 0)
+ hwmon->last_update = jiffies;
+ return rc;
+}
+
+static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
+ efx_dword_t *entry)
+{
+ struct efx_nic *efx = dev_get_drvdata(dev->parent);
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0);
+
+ mutex_lock(&hwmon->update_lock);
+
+ /* Use cached value if last update was < 1 s ago */
+ if (time_before(jiffies, hwmon->last_update + HZ))
+ rc = 0;
+ else
+ rc = efx_mcdi_mon_update(efx);
+
+ /* Copy out the requested entry */
+ *entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index];
+
+ mutex_unlock(&hwmon->update_lock);
+
+ return rc;
+}
+
+static ssize_t efx_mcdi_mon_show_value(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_mcdi_mon_attribute *mon_attr =
+ container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+ efx_dword_t entry;
+ unsigned int value, state;
+ int rc;
+
+ rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
+ if (rc)
+ return rc;
+
+ state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ if (state == MC_CMD_SENSOR_STATE_NO_READING)
+ return -EBUSY;
+
+ value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
+
+ switch (mon_attr->hwmon_type) {
+ case EFX_HWMON_TEMP:
+ /* Convert temperature from degrees to milli-degrees Celsius */
+ value *= 1000;
+ break;
+ case EFX_HWMON_POWER:
+ /* Convert power from watts to microwatts */
+ value *= 1000000;
+ break;
+ default:
+ /* No conversion needed */
+ break;
+ }
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_mcdi_mon_attribute *mon_attr =
+ container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+ unsigned int value;
+
+ value = mon_attr->limit_value;
+
+ switch (mon_attr->hwmon_type) {
+ case EFX_HWMON_TEMP:
+ /* Convert temperature from degrees to milli-degrees Celsius */
+ value *= 1000;
+ break;
+ case EFX_HWMON_POWER:
+ /* Convert power from watts to microwatts */
+ value *= 1000000;
+ break;
+ default:
+ /* No conversion needed */
+ break;
+ }
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t efx_mcdi_mon_show_alarm(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_mcdi_mon_attribute *mon_attr =
+ container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+ efx_dword_t entry;
+ int state;
+ int rc;
+
+ rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
+ if (rc)
+ return rc;
+
+ state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK);
+}
+
+static ssize_t efx_mcdi_mon_show_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_mcdi_mon_attribute *mon_attr =
+ container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+ return sprintf(buf, "%s\n",
+ efx_mcdi_sensor_type[mon_attr->type].label);
+}
+
+static void
+efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
+ ssize_t (*reader)(struct device *,
+ struct device_attribute *, char *),
+ unsigned int index, unsigned int type,
+ unsigned int limit_value)
+{
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+ struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
+
+ strlcpy(attr->name, name, sizeof(attr->name));
+ attr->index = index;
+ attr->type = type;
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+ attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ else
+ attr->hwmon_type = EFX_HWMON_UNKNOWN;
+ attr->limit_value = limit_value;
+ sysfs_attr_init(&attr->dev_attr.attr);
+ attr->dev_attr.attr.name = attr->name;
+ attr->dev_attr.attr.mode = S_IRUGO;
+ attr->dev_attr.show = reader;
+ hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
+}
+
+int efx_mcdi_mon_probe(struct efx_nic *efx)
+{
+ unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
+ unsigned int n_pages, n_sensors, n_attrs, page;
+ size_t outlen;
+ char name[12];
+ u32 mask;
+ int rc, i, j, type;
+
+ /* Find out how many sensors are present */
+ n_sensors = 0;
+ page = 0;
+ do {
+ MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
+ return -EIO;
+
+ mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
+ n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ ++page;
+ } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ n_pages = page;
+
+ /* Don't create a device if there are none */
+ if (n_sensors == 0)
+ return 0;
+
+ rc = efx_nic_alloc_buffer(
+ efx, &hwmon->dma_buf,
+ n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ mutex_init(&hwmon->update_lock);
+ efx_mcdi_mon_update(efx);
+
+ /* Allocate space for the maximum possible number of
+ * attributes for this set of sensors:
+ * value, min, max, crit, alarm and label for each sensor.
+ */
+ n_attrs = 6 * n_sensors;
+ hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
+ if (!hwmon->attrs) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!hwmon->group.attrs) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0, j = -1, type = -1; ; i++) {
+ enum efx_hwmon_type hwmon_type;
+ const char *hwmon_prefix;
+ unsigned hwmon_index;
+ u16 min1, max1, min2, max2;
+
+ /* Find next sensor type or exit if there is none */
+ do {
+ type++;
+
+ if ((type % 32) == 0) {
+ page = type / 32;
+ j = -1;
+ if (page == n_pages)
+ goto hwmon_register;
+
+ MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
+ page);
+ rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
+ inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ mask = (MCDI_DWORD(outbuf,
+ SENSOR_INFO_OUT_MASK) &
+ ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
+
+ /* Check again for short response */
+ if (outlen <
+ MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ } while (!(mask & (1 << type % 32)));
+ j++;
+
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+
+ /* Skip sensors specific to a different port */
+ if (hwmon_type != EFX_HWMON_UNKNOWN &&
+ efx_mcdi_sensor_type[type].port >= 0 &&
+ efx_mcdi_sensor_type[type].port !=
+ efx_port_num(efx))
+ continue;
+ } else {
+ hwmon_type = EFX_HWMON_UNKNOWN;
+ }
+
+ switch (hwmon_type) {
+ case EFX_HWMON_TEMP:
+ hwmon_prefix = "temp";
+ hwmon_index = ++n_temp; /* 1-based */
+ break;
+ case EFX_HWMON_COOL:
+ /* This is likely to be a heatsink, but there
+ * is no convention for representing cooling
+ * devices other than fans.
+ */
+ hwmon_prefix = "fan";
+ hwmon_index = ++n_cool; /* 1-based */
+ break;
+ default:
+ hwmon_prefix = "in";
+ hwmon_index = n_in++; /* 0-based */
+ break;
+ case EFX_HWMON_CURR:
+ hwmon_prefix = "curr";
+ hwmon_index = ++n_curr; /* 1-based */
+ break;
+ case EFX_HWMON_POWER:
+ hwmon_prefix = "power";
+ hwmon_index = ++n_power; /* 1-based */
+ break;
+ }
+
+ min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+ SENSOR_INFO_ENTRY, j, MIN1);
+ max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+ SENSOR_INFO_ENTRY, j, MAX1);
+ min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+ SENSOR_INFO_ENTRY, j, MIN2);
+ max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+ SENSOR_INFO_ENTRY, j, MAX2);
+
+ if (min1 != max1) {
+ snprintf(name, sizeof(name), "%s%u_input",
+ hwmon_prefix, hwmon_index);
+ efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_value, i, type, 0);
+
+ if (hwmon_type != EFX_HWMON_POWER) {
+ snprintf(name, sizeof(name), "%s%u_min",
+ hwmon_prefix, hwmon_index);
+ efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, min1);
+ }
+
+ snprintf(name, sizeof(name), "%s%u_max",
+ hwmon_prefix, hwmon_index);
+ efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, max1);
+
+ if (min2 != max2) {
+ /* Assume max2 is critical value.
+ * But we have no good way to expose min2.
+ */
+ snprintf(name, sizeof(name), "%s%u_crit",
+ hwmon_prefix, hwmon_index);
+ efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, max2);
+ }
+ }
+
+ snprintf(name, sizeof(name), "%s%u_alarm",
+ hwmon_prefix, hwmon_index);
+ efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
+
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
+ efx_mcdi_sensor_type[type].label) {
+ snprintf(name, sizeof(name), "%s%u_label",
+ hwmon_prefix, hwmon_index);
+ efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_label, i, type, 0);
+ }
+ }
+
+hwmon_register:
+ hwmon->groups[0] = &hwmon->group;
+ hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev,
+ KBUILD_MODNAME, NULL,
+ hwmon->groups);
+ if (IS_ERR(hwmon->device)) {
+ rc = PTR_ERR(hwmon->device);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ efx_mcdi_mon_remove(efx);
+ return rc;
+}
+
+void efx_mcdi_mon_remove(struct efx_nic *efx)
+{
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+
+ if (hwmon->device)
+ hwmon_device_unregister(hwmon->device);
+ kfree(hwmon->attrs);
+ kfree(hwmon->group.attrs);
+ efx_nic_free_buffer(efx, &hwmon->dma_buf);
+}
+
+#endif /* CONFIG_SFC_MCDI_MON */
diff --git a/kernel/drivers/net/ethernet/sfc/mcdi_pcol.h b/kernel/drivers/net/ethernet/sfc/mcdi_pcol.h
new file mode 100644
index 000000000..e028de10e
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -0,0 +1,7907 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+
+#ifndef MCDI_PCOL_H
+#define MCDI_PCOL_H
+
+/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
+/* Power-on reset state */
+#define MC_FW_STATE_POR (1)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash. */
+#define MC_FW_WARM_BOOT_OK (2)
+/* The MC main image has started to boot. */
+#define MC_FW_STATE_BOOTING (4)
+/* The Scheduler has started. */
+#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
+
+/* Siena MC shared memmory offsets */
+/* The 'doorbell' addresses are hard-wired to alert the MC when written */
+#define MC_SMEM_P0_DOORBELL_OFST 0x000
+#define MC_SMEM_P1_DOORBELL_OFST 0x004
+/* The rest of these are firmware-defined */
+#define MC_SMEM_P0_PDU_OFST 0x008
+#define MC_SMEM_P1_PDU_OFST 0x108
+#define MC_SMEM_PDU_LEN 0x100
+#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0
+#define MC_SMEM_P0_STATUS_OFST 0x7f8
+#define MC_SMEM_P1_STATUS_OFST 0x7fc
+
+/* Values to be written to the per-port status dword in shared
+ * memory on reboot and assert */
+#define MC_STATUS_DWORD_REBOOT (0xb007b007)
+#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
+/* The current version of the MCDI protocol.
+ *
+ * Note that the ROM burnt into the card only talks V0, so at the very
+ * least every driver must support version 0 and MCDI_PCOL_VERSION
+ */
+#define MCDI_PCOL_VERSION 2
+
+/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
+
+/* MCDI version 1
+ *
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
+ * structure, filled in by the client.
+ *
+ * 0 7 8 16 20 22 23 24 31
+ * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ * | | |
+ * | | \--- Response
+ * | \------- Error
+ * \------------------------------ Resync (always set)
+ *
+ * The client writes it's request into MC shared memory, and rings the
+ * doorbell. Each request is completed by either by the MC writting
+ * back into shared memory, or by writting out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some response's may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request, a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+
+
+/* The MC can generate events for two reasons:
+ * - To complete a shared memory request if XFLAGS_EVREQ was set
+ * - As a notification (link state, i2c event), controlled
+ * via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ * 0 32 33 36 44 52 60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ * |
+ * \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ * - LEVEL==INFO Command succeeded
+ * - LEVEL==ERR Command failed
+ *
+ * 0 8 16 24 32
+ * | Seq | Datalen | Errno | Rsvd |
+ *
+ * These fields are taken directly out of the standard MCDI header, i.e.,
+ * LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header. An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0. This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ * 0 7 8
+ * | command | Resync | = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ * 56 60 63
+ * | Rsvd | Code | = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
+
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 13
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 22
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 35
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 38
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+
+#define MC_CMD_ERR_CODE_OFST 0
+
+/* We define 8 "escape" commands to allow
+ for command number space extension */
+
+#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78
+#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79
+#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A
+#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B
+#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C
+#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D
+#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E
+#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
+/* Points to the recovery mode entry point. */
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
+
+/* The command set exported by the boot ROM (MCDI v0) */
+#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
+ (1 << MC_CMD_READ32) | \
+ (1 << MC_CMD_WRITE32) | \
+ (1 << MC_CMD_COPYCODE) | \
+ (1 << MC_CMD_GET_VERSION), \
+ 0, 0, 0 }
+
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
+ (MC_CMD_SENSOR_ENTRY_OFST + (_x))
+
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
+/* MCDI_EVENT structuredef */
+#define MCDI_EVENT_LEN 8
+#define MCDI_EVENT_CONT_LBN 32
+#define MCDI_EVENT_CONT_WIDTH 1
+#define MCDI_EVENT_LEVEL_LBN 33
+#define MCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MCDI_EVENT_LEVEL_FATAL 0x3
+#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: 100Mbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define MCDI_EVENT_FWALERT_DATA_LBN 8
+#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
+#define MCDI_EVENT_FWALERT_REASON_LBN 0
+#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
+/* enum: SRAM Access. */
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
+#define MCDI_EVENT_FLR_VF_LBN 0
+#define MCDI_EVENT_FLR_VF_WIDTH 8
+#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
+#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
+#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
+/* enum: Descriptor loader reported failure */
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
+#define MCDI_EVENT_TX_ERR_INFO_LBN 16
+#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
+#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
+#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
+#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
+/* enum: PLL lost lock */
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define MCDI_EVENT_DATA_LBN 0
+#define MCDI_EVENT_DATA_WIDTH 32
+#define MCDI_EVENT_SRC_LBN 36
+#define MCDI_EVENT_SRC_WIDTH 8
+#define MCDI_EVENT_EV_CODE_LBN 60
+#define MCDI_EVENT_EV_CODE_WIDTH 4
+#define MCDI_EVENT_CODE_LBN 44
+#define MCDI_EVENT_CODE_WIDTH 8
+/* enum: Bad assert. */
+#define MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
+#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LBN 0
+#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
+#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
+#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LBN 0
+#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LBN 0
+#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
+#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
+#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
+#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LBN 0
+#define MCDI_EVENT_PTP_UUID_WIDTH 32
+#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+
+/* FCDI_EVENT structuredef */
+#define FCDI_EVENT_LEN 8
+#define FCDI_EVENT_CONT_LBN 32
+#define FCDI_EVENT_CONT_WIDTH 1
+#define FCDI_EVENT_LEVEL_LBN 33
+#define FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define FCDI_EVENT_LEVEL_FATAL 0x3
+#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define FCDI_EVENT_DATA_LBN 0
+#define FCDI_EVENT_DATA_WIDTH 32
+#define FCDI_EVENT_SRC_LBN 36
+#define FCDI_EVENT_SRC_WIDTH 8
+#define FCDI_EVENT_EV_CODE_LBN 60
+#define FCDI_EVENT_EV_CODE_WIDTH 4
+#define FCDI_EVENT_CODE_LBN 44
+#define FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
+/* Number of timestamps following */
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
+
+
+/***********************************/
+/* MC_CMD_READ32
+ * Read multiple 32byte words from MC memory.
+ */
+#define MC_CMD_READ32 0x1
+
+/* MC_CMD_READ32_IN msgrequest */
+#define MC_CMD_READ32_IN_LEN 8
+#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+
+/* MC_CMD_READ32_OUT msgresponse */
+#define MC_CMD_READ32_OUT_LENMIN 4
+#define MC_CMD_READ32_OUT_LENMAX 252
+#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_WRITE32
+ * Write multiple 32byte words to MC memory.
+ */
+#define MC_CMD_WRITE32 0x2
+
+/* MC_CMD_WRITE32_IN msgrequest */
+#define MC_CMD_WRITE32_IN_LENMIN 8
+#define MC_CMD_WRITE32_IN_LENMAX 252
+#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
+#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
+
+/* MC_CMD_WRITE32_OUT msgresponse */
+#define MC_CMD_WRITE32_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_COPYCODE
+ * Copy MC code between two locations and jump.
+ */
+#define MC_CMD_COPYCODE 0x3
+
+/* MC_CMD_COPYCODE_IN msgrequest */
+#define MC_CMD_COPYCODE_IN_LEN 16
+/* Source address */
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: The main image should be entered via a copy of a single word from and
+ * to this address when none of the other magic behaviours are required.
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
+/* enum: Entering the main image via a copy of a single word from and to this
+ * address indicates that it should not attempt to start the datapath CPUs.
+ * This is useful for certain soft rebooting scenarios. (Huntington only)
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Entering the main image via a copy of a single word from and to this
+ * address indicates that it should not attempt to parse any configuration from
+ * flash. (In addition, the datapath CPUs will not be started, as for
+ * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
+ * certain soft rebooting scenarios. (Huntington only)
+ */
+#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+/* Destination address */
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+/* Address of where to jump after copy. */
+#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+/* enum: Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1
+
+/* MC_CMD_COPYCODE_OUT msgresponse */
+#define MC_CMD_COPYCODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
+ */
+#define MC_CMD_SET_FUNC 0x4
+
+/* MC_CMD_SET_FUNC_IN msgrequest */
+#define MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
+#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+
+/* MC_CMD_SET_FUNC_OUT msgresponse */
+#define MC_CMD_SET_FUNC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x5
+
+/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
+#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
+#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+/* enum: indicates that the MC wasn't flash booted */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_ASSERTS
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
+ */
+#define MC_CMD_GET_ASSERTS 0x6
+
+/* MC_CMD_GET_ASSERTS_IN msgrequest */
+#define MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+
+/* MC_CMD_GET_ASSERTS_OUT msgresponse */
+#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+/* enum: No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+
+
+/***********************************/
+/* MC_CMD_LOG_CTRL
+ * Configure the output stream for various events and messages.
+ */
+#define MC_CMD_LOG_CTRL 0x7
+
+/* MC_CMD_LOG_CTRL_IN msgrequest */
+#define MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+/* enum: UART. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+
+/* MC_CMD_LOG_CTRL_OUT msgresponse */
+#define MC_CMD_LOG_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VERSION
+ * Get version information about the MC firmware.
+ */
+#define MC_CMD_GET_VERSION 0x8
+
+/* MC_CMD_GET_VERSION_IN msgrequest */
+#define MC_CMD_GET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
+#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+/* enum: Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+
+/* MC_CMD_GET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
+
+
+/***********************************/
+/* MC_CMD_PTP
+ * Perform PTP operation
+ */
+#define MC_CMD_PTP 0xb
+
+/* MC_CMD_PTP_IN msgrequest */
+#define MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
+#define MC_CMD_PTP_IN_OP_OFST 0
+#define MC_CMD_PTP_IN_OP_LEN 1
+/* enum: Enable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. */
+#define MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register */
+#define MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register */
+#define MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source */
+#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. */
+#define MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Above this for future use. */
+#define MC_CMD_PTP_OP_MAX 0x1b
+
+/* MC_CMD_PTP_IN_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_ENABLE_LEN 16
+#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+/* Event queue for PTP events */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+/* PTP timestamping mode */
+#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+/* enum: PTP, version 1 */
+#define MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define MC_CMD_PTP_MODE_FCOE 0x5
+
+/* MC_CMD_PTP_IN_DISABLE msgrequest */
+#define MC_CMD_PTP_IN_DISABLE_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
+#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
+#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
+#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Transmit packet length */
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+/* Transmit packet data */
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_STATUS msgrequest */
+#define MC_CMD_PTP_IN_STATUS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+
+/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of time readings to capture */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+
+/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Enable or disable packet testing */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+
+/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
+#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset PTP statistics */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_DEBUG msgrequest */
+#define MC_CMD_PTP_IN_DEBUG_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Debug operations */
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+/* Set of VLAN tags to filter against */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+/* UUID to filter against */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+/* Domain number to filter against */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Set the clock source. */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+/* enum: Internal. */
+#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest */
+#define MC_CMD_PTP_IN_RST_CLK_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset value of Timer Reg. */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Enable or disable */
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+/* enum: Enable */
+#define MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define MC_CMD_PTP_DISABLE_PPS 0x1
+/* Queue id to send events back */
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Event queue to send PTP time events to */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+
+/* MC_CMD_PTP_OUT msgresponse */
+#define MC_CMD_PTP_OUT_LEN 0
+
+/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
+#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+/* Number of packets transmitted and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+/* Number of packets received and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+/* Number of packets timestamped by the FPGA */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+/* Number of packets filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+/* Number of packets not filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+/* Number of PPS overflows (noise on input?) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+/* Number of PPS bad periods */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+/* Minimum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+/* Maximum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+/* Last period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+/* Mean period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+/* Last offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+/* Mean offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+
+/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+/* A set of host and NIC times */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+/* Host time immediately before NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+/* Host time immediately after NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+/* Number of nanoseconds waited after reading NIC's hardware clock */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+/* enum: Successful test */
+#define MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
+/* Presence of external oscillator */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+/* Number of packets received by FPGA */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+/* Number of packets received by Siena filters */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+/* Uncorrected error on receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
+
+/***********************************/
+/* MC_CMD_CSR_READ32
+ * Read 32bit words from the indirect memory map.
+ */
+#define MC_CMD_CSR_READ32 0xc
+
+/* MC_CMD_CSR_READ32_IN msgrequest */
+#define MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
+#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+
+/* MC_CMD_CSR_READ32_OUT msgresponse */
+#define MC_CMD_CSR_READ32_OUT_LENMIN 4
+#define MC_CMD_CSR_READ32_OUT_LENMAX 252
+#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+/* The last dword is the status, not a value read */
+#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_CSR_WRITE32
+ * Write 32bit dwords to the indirect memory map.
+ */
+#define MC_CMD_CSR_WRITE32 0xd
+
+/* MC_CMD_CSR_WRITE32_IN msgrequest */
+#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
+#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
+#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+/* Address */
+#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
+
+/* MC_CMD_CSR_WRITE32_OUT msgresponse */
+#define MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+
+/* MC_CMD_HP_IN msgrequest */
+#define MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define MC_CMD_HP_IN_SUBCMD_OFST 0
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+
+/* MC_CMD_HP_OUT msgresponse */
+#define MC_CMD_HP_OUT_LEN 4
+#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+/* enum: OCSD stopped for this card. */
+#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
+/* MC_CMD_STACKINFO
+ * Get stack information.
+ */
+#define MC_CMD_STACKINFO 0xf
+
+/* MC_CMD_STACKINFO_IN msgrequest */
+#define MC_CMD_STACKINFO_IN_LEN 0
+
+/* MC_CMD_STACKINFO_OUT msgresponse */
+#define MC_CMD_STACKINFO_OUT_LENMIN 12
+#define MC_CMD_STACKINFO_OUT_LENMAX 252
+#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+/* (thread ptr, stack size, free space) for each thread in system */
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_MDIO_READ
+ * MDIO register read.
+ */
+#define MC_CMD_MDIO_READ 0x10
+
+/* MC_CMD_MDIO_READ_IN msgrequest */
+#define MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+/* enum: Internal. */
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
+#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
+#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+
+/* MC_CMD_MDIO_READ_OUT msgresponse */
+#define MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
+#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+/* enum: Good. */
+#define MC_CMD_MDIO_STATUS_GOOD 0x8
+
+
+/***********************************/
+/* MC_CMD_MDIO_WRITE
+ * MDIO register write.
+ */
+#define MC_CMD_MDIO_WRITE 0x11
+
+/* MC_CMD_MDIO_WRITE_IN msgrequest */
+#define MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+/* enum: Internal. */
+/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
+/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+/* MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
+#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+/* Value */
+#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+
+/* MC_CMD_MDIO_WRITE_OUT msgresponse */
+#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+/* enum: Good. */
+/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
+
+
+/***********************************/
+/* MC_CMD_DBI_WRITE
+ * Write DBI register(s).
+ */
+#define MC_CMD_DBI_WRITE 0x12
+
+/* MC_CMD_DBI_WRITE_IN msgrequest */
+#define MC_CMD_DBI_WRITE_IN_LENMIN 12
+#define MC_CMD_DBI_WRITE_IN_LENMAX 252
+#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
+
+/* MC_CMD_DBI_WRITE_OUT msgresponse */
+#define MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBIWROP_TYPEDEF structuredef */
+#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PORT_READ32
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ32 0x14
+
+/* MC_CMD_PORT_READ32_IN msgrequest */
+#define MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ32_OUT msgresponse */
+#define MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
+#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+/* Status */
+#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE32
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE32 0x15
+
+/* MC_CMD_PORT_WRITE32_IN msgrequest */
+#define MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
+#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+/* Value */
+#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+
+/* MC_CMD_PORT_WRITE32_OUT msgresponse */
+#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PORT_READ128
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ128 0x16
+
+/* MC_CMD_PORT_READ128_IN msgrequest */
+#define MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ128_OUT msgresponse */
+#define MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
+#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
+#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE128
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE128 0x17
+
+/* MC_CMD_PORT_WRITE128_IN msgrequest */
+#define MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
+#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+/* Value */
+#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
+
+/* MC_CMD_PORT_WRITE128_OUT msgresponse */
+#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+
+/* MC_CMD_CAPABILITIES structuredef */
+#define MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define MC_CMD_CAPABILITIES_PTP_LBN 3
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define MC_CMD_CAPABILITIES_AOE_LBN 4
+#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_CFG
+ * Returns the MC firmware configuration structure.
+ */
+#define MC_CMD_GET_BOARD_CFG 0x18
+
+/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
+#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
+#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+/* This field contains a 16-bit value for each of the types of NVRAM area. The
+ * values are defined in the firmware/mc/platform/.c file for a specific board
+ * type, but otherwise have no meaning to the MC; they are used by the driver
+ * to manage selection of appropriate firmware updates.
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_DBI_READX
+ * Read DBI register(s) -- extended functionality
+ */
+#define MC_CMD_DBI_READX 0x19
+
+/* MC_CMD_DBI_READX_IN msgrequest */
+#define MC_CMD_DBI_READX_IN_LENMIN 8
+#define MC_CMD_DBI_READX_IN_LENMAX 248
+#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+/* Each Read op consists of an address (offset 0), VF/CS2) */
+#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
+
+/* MC_CMD_DBI_READX_OUT msgresponse */
+#define MC_CMD_DBI_READX_OUT_LENMIN 4
+#define MC_CMD_DBI_READX_OUT_LENMAX 252
+#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+/* Value */
+#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
+#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
+#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
+#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_RAND_SEED
+ * Set the 16byte seed for the MC pseudo-random generator.
+ */
+#define MC_CMD_SET_RAND_SEED 0x1a
+
+/* MC_CMD_SET_RAND_SEED_IN msgrequest */
+#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
+#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
+
+/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
+#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LTSSM_HIST
+ * Retrieve the history of the LTSSM, if the build supports it.
+ */
+#define MC_CMD_LTSSM_HIST 0x1b
+
+/* MC_CMD_LTSSM_HIST_IN msgrequest */
+#define MC_CMD_LTSSM_HIST_IN_LEN 0
+
+/* MC_CMD_LTSSM_HIST_OUT msgresponse */
+#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
+#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
+#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
+#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_DRV_ATTACH
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
+ */
+#define MC_CMD_DRV_ATTACH 0x1c
+
+/* MC_CMD_DRV_ATTACH_IN msgrequest */
+#define MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state (0=detached, 1=attached) to set if UPDATE=1 */
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+/* 1 to set new state, or 0 to just report the existing state */
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+/* enum: Prefer to use full featured firmware */
+#define MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define MC_CMD_FW_LOW_LATENCY 0x1
+
+/* MC_CMD_DRV_ATTACH_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state (0=detached, 1=attached) */
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state (0=detached, 1=attached) */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+/* Flags associated with this function */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
+
+
+/***********************************/
+/* MC_CMD_SHMUART
+ * Route UART output to circular buffer in shared memory instead.
+ */
+#define MC_CMD_SHMUART 0x1f
+
+/* MC_CMD_SHMUART_IN msgrequest */
+#define MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
+#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+
+/* MC_CMD_SHMUART_OUT msgresponse */
+#define MC_CMD_SHMUART_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ENTITY_RESET
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
+ */
+#define MC_CMD_ENTITY_RESET 0x20
+
+/* MC_CMD_ENTITY_RESET_IN msgrequest */
+#define MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
+#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
+
+/* MC_CMD_ENTITY_RESET_OUT msgresponse */
+#define MC_CMD_ENTITY_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_CREDITS
+ * Read instantaneous and minimum flow control thresholds.
+ */
+#define MC_CMD_PCIE_CREDITS 0x21
+
+/* MC_CMD_PCIE_CREDITS_IN msgrequest */
+#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+/* wipe statistics */
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+
+/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
+#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
+
+
+/***********************************/
+/* MC_CMD_RXD_MONITOR
+ * Get histogram of RX queue fill level.
+ */
+#define MC_CMD_RXD_MONITOR 0x22
+
+/* MC_CMD_RXD_MONITOR_IN msgrequest */
+#define MC_CMD_RXD_MONITOR_IN_LEN 12
+#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+
+/* MC_CMD_RXD_MONITOR_OUT msgresponse */
+#define MC_CMD_RXD_MONITOR_OUT_LEN 80
+#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+
+
+/***********************************/
+/* MC_CMD_PUTS
+ * Copy the given ASCII string out onto UART and/or out of the network port.
+ */
+#define MC_CMD_PUTS 0x23
+
+/* MC_CMD_PUTS_IN msgrequest */
+#define MC_CMD_PUTS_IN_LENMIN 13
+#define MC_CMD_PUTS_IN_LENMAX 252
+#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
+#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_UART_LBN 0
+#define MC_CMD_PUTS_IN_UART_WIDTH 1
+#define MC_CMD_PUTS_IN_PORT_LBN 1
+#define MC_CMD_PUTS_IN_PORT_WIDTH 1
+#define MC_CMD_PUTS_IN_DHOST_OFST 4
+#define MC_CMD_PUTS_IN_DHOST_LEN 6
+#define MC_CMD_PUTS_IN_STRING_OFST 12
+#define MC_CMD_PUTS_IN_STRING_LEN 1
+#define MC_CMD_PUTS_IN_STRING_MINNUM 1
+#define MC_CMD_PUTS_IN_STRING_MAXNUM 240
+
+/* MC_CMD_PUTS_OUT msgresponse */
+#define MC_CMD_PUTS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_CFG
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
+ */
+#define MC_CMD_GET_PHY_CFG 0x24
+
+/* MC_CMD_GET_PHY_CFG_IN msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+
+/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
+#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+/* Bitmask of supported capabilities */
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_PHY_CAP_10HDX_LBN 1
+#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10FDX_LBN 2
+#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100HDX_LBN 3
+#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100FDX_LBN 4
+#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define MC_CMD_PHY_CAP_ASYM_LBN 9
+#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define MC_CMD_PHY_CAP_AN_LBN 10
+#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_DDM_LBN 12
+#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+/* enum: Xaui. */
+#define MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+/* enum: Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0x0
+#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
+#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
+#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
+#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
+#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
+#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
+#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+
+/***********************************/
+/* MC_CMD_START_BIST
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_START_BIST 0x25
+
+/* MC_CMD_START_BIST_IN msgrequest */
+#define MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+/* enum: Run the PHY's short cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define MC_CMD_REG_BIST 0x8
+
+/* MC_CMD_START_BIST_OUT msgresponse */
+#define MC_CMD_START_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_BIST
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
+ */
+#define MC_CMD_POLL_BIST 0x26
+
+/* MC_CMD_POLL_BIST_IN msgrequest */
+#define MC_CMD_POLL_BIST_IN_LEN 0
+
+/* MC_CMD_POLL_BIST_OUT msgresponse */
+#define MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
+#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+/* enum: Running. */
+#define MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+
+/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+/* Status of each channel A */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+/* enum: Ok. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel C */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel D */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+
+/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+/* enum: Complete. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+/* enum: Test has completed. */
+#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+/* Bus or address space to which the failure address corresponds */
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+/* enum: MC MIPS bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* Pattern written to RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+/* Actual value read from RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+/* ECC error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+/* ECC parity error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+/* ECC fatal error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+
+
+/***********************************/
+/* MC_CMD_FLUSH_RX_QUEUES
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
+ */
+#define MC_CMD_FLUSH_RX_QUEUES 0x27
+
+/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
+
+/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
+#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LOOPBACK_MODES
+ * Returns a bitmask of loopback modes available at each speed.
+ */
+#define MC_CMD_GET_LOOPBACK_MODES 0x28
+
+/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+/* enum: None. */
+#define MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+
+
+/***********************************/
+/* MC_CMD_GET_LINK
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
+ */
+#define MC_CMD_GET_LINK 0x29
+
+/* MC_CMD_GET_LINK_IN msgrequest */
+#define MC_CMD_GET_LINK_IN_LEN 0
+
+/* MC_CMD_GET_LINK_OUT msgresponse */
+#define MC_CMD_GET_LINK_OUT_LEN 28
+/* near-side advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+/* link-partner advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_SET_LINK
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME
+ */
+#define MC_CMD_SET_LINK 0x2a
+
+/* MC_CMD_SET_LINK_IN msgrequest */
+#define MC_CMD_SET_LINK_IN_LEN 16
+/* ??? */
+#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+/* Flags */
+#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+
+/* MC_CMD_SET_LINK_OUT msgresponse */
+#define MC_CMD_SET_LINK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_ID_LED
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_ID_LED 0x2b
+
+/* MC_CMD_SET_ID_LED_IN msgrequest */
+#define MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
+#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_LED_OFF 0x0 /* enum */
+#define MC_CMD_LED_ON 0x1 /* enum */
+#define MC_CMD_LED_DEFAULT 0x2 /* enum */
+
+/* MC_CMD_SET_ID_LED_OUT msgresponse */
+#define MC_CMD_SET_ID_LED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MAC
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_MAC 0x2c
+
+/* MC_CMD_SET_MAC_IN msgrequest */
+#define MC_CMD_SET_MAC_IN_LEN 24
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+
+/* MC_CMD_SET_MAC_OUT msgresponse */
+#define MC_CMD_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PHY_STATS
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
+ */
+#define MC_CMD_PHY_STATS 0x2d
+
+/* MC_CMD_PHY_STATS_IN msgrequest */
+#define MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
+/* enum: OUI. */
+#define MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define MC_CMD_PHY_NSTATS 0x17
+
+
+/***********************************/
+/* MC_CMD_MAC_STATS
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. Returns: 0, ETIME
+ */
+#define MC_CMD_MAC_STATS 0x2e
+
+/* MC_CMD_MAC_STATS_IN msgrequest */
+#define MC_CMD_MAC_STATS_IN_LEN 16
+/* ??? */
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+
+/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+/* enum: RXDP counter: Number of times an emergency descriptor fetch was
+ * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_START 0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
+#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+
+/***********************************/
+/* MC_CMD_SRIOV
+ * to be documented
+ */
+#define MC_CMD_SRIOV 0x30
+
+/* MC_CMD_SRIOV_IN msgrequest */
+#define MC_CMD_SRIOV_IN_LEN 12
+#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+
+/* MC_CMD_SRIOV_OUT msgresponse */
+#define MC_CMD_SRIOV_OUT_LEN 8
+#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+
+/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MEMCPY
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
+ */
+#define MC_CMD_MEMCPY 0x31
+
+/* MC_CMD_MEMCPY_IN msgrequest */
+#define MC_CMD_MEMCPY_IN_LENMIN 32
+#define MC_CMD_MEMCPY_IN_LENMAX 224
+#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
+#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
+#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
+#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
+#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
+
+/* MC_CMD_MEMCPY_OUT msgresponse */
+#define MC_CMD_MEMCPY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_SET
+ * Set a WoL filter.
+ */
+#define MC_CMD_WOL_FILTER_SET 0x32
+
+/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+/* enum: Magic */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define MC_CMD_WOL_TYPE_MAX 0x7
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
+
+/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
+
+/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
+/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_REMOVE
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_REMOVE 0x33
+
+/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
+#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+
+/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_RESET
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_RESET 0x34
+
+/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
+#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
+
+/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MCAST_HASH
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
+ */
+#define MC_CMD_SET_MCAST_HASH 0x35
+
+/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
+#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
+
+/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
+#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TYPES
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
+ */
+#define MC_CMD_NVRAM_TYPES 0x36
+
+/* MC_CMD_NVRAM_TYPES_IN msgrequest */
+#define MC_CMD_NVRAM_TYPES_IN_LEN 0
+
+/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
+#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+/* enum: Disabled callisto. */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+
+
+/***********************************/
+/* MC_CMD_NVRAM_INFO
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_INFO 0x37
+
+/* MC_CMD_NVRAM_INFO_IN msgrequest */
+#define MC_CMD_NVRAM_INFO_IN_LEN 4
+#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_INFO_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_START
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held).
+ */
+#define MC_CMD_NVRAM_UPDATE_START 0x38
+
+/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
+#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_READ
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_READ 0x39
+
+/* MC_CMD_NVRAM_READ_IN msgrequest */
+#define MC_CMD_NVRAM_READ_IN_LEN 12
+#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_READ_OUT msgresponse */
+#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
+#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
+#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_NVRAM_WRITE
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_WRITE 0x3a
+
+/* MC_CMD_NVRAM_WRITE_IN msgrequest */
+#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
+#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
+#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_ERASE
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_ERASE 0x3b
+
+/* MC_CMD_NVRAM_ERASE_IN msgrequest */
+#define MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
+#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_FINISH
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
+ * type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_REBOOT
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
+ */
+#define MC_CMD_REBOOT 0x3d
+
+/* MC_CMD_REBOOT_IN msgrequest */
+#define MC_CMD_REBOOT_IN_LEN 4
+#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
+
+/* MC_CMD_REBOOT_OUT msgresponse */
+#define MC_CMD_REBOOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SCHEDINFO
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
+ */
+#define MC_CMD_SCHEDINFO 0x3e
+
+/* MC_CMD_SCHEDINFO_IN msgrequest */
+#define MC_CMD_SCHEDINFO_IN_LEN 0
+
+/* MC_CMD_SCHEDINFO_OUT msgresponse */
+#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
+#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
+#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
+#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
+#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
+#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
+ */
+#define MC_CMD_REBOOT_MODE 0x3f
+
+/* MC_CMD_REBOOT_MODE_IN msgrequest */
+#define MC_CMD_REBOOT_MODE_IN_LEN 4
+#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+/* enum: Normal. */
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
+
+/* MC_CMD_REBOOT_MODE_OUT msgresponse */
+#define MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SENSOR_INFO
+ * Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
+ */
+#define MC_CMD_SENSOR_INFO 0x41
+
+/* MC_CMD_SENSOR_INFO_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_IN_LEN 0
+
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+
+/* MC_CMD_SENSOR_INFO_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+/* enum: Controller temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+#define MC_CMD_SENSOR_ENTRY_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LEN 8
+#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
+#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO_OUT */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/* MC_CMD_SENSOR_ENTRY_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LEN 8 */
+/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_SENSORS
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
+ */
+#define MC_CMD_READ_SENSORS 0x42
+
+/* MC_CMD_READ_SENSORS_IN msgrequest */
+#define MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings */
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+
+/* MC_CMD_READ_SENSORS_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_OUT_LEN 0
+
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
+/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
+/* enum: Ok. */
+#define MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_STATE
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
+ */
+#define MC_CMD_GET_PHY_STATE 0x43
+
+/* MC_CMD_GET_PHY_STATE_IN msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+
+/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
+#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+/* enum: Ok. */
+#define MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2
+
+
+/***********************************/
+/* MC_CMD_SETUP_8021QBB
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
+ */
+#define MC_CMD_SETUP_8021QBB 0x44
+
+/* MC_CMD_SETUP_8021QBB_IN msgrequest */
+#define MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
+
+/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
+#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_GET
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_GET 0x45
+
+/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
+
+/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_RESET_RESTORE
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
+ */
+#define MC_CMD_MAC_RESET_RESTORE 0x48
+
+/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
+#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+
+/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
+#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
+ */
+#define MC_CMD_TESTASSERT 0x49
+
+/* MC_CMD_TESTASSERT_IN msgrequest */
+#define MC_CMD_TESTASSERT_IN_LEN 0
+
+/* MC_CMD_TESTASSERT_OUT msgresponse */
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WORKAROUND
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
+ */
+#define MC_CMD_WORKAROUND 0x4a
+
+/* MC_CMD_WORKAROUND_IN msgrequest */
+#define MC_CMD_WORKAROUND_IN_LEN 8
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+/* enum: Bug 17230 work around. */
+#define MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_WORKAROUND_BUG35017 0x3
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+
+/* MC_CMD_WORKAROUND_OUT msgresponse */
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_MEDIA_INFO
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TEST
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+
+/* MC_CMD_NVRAM_TEST_IN msgrequest */
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_TEST_OUT msgresponse */
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+/* enum: Passed. */
+#define MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
+
+
+/***********************************/
+/* MC_CMD_MRSFP_TWEAK
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+
+/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+/* 0-8 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+
+/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
+
+/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+/* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+/* direction */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+/* enum: Out. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
+
+
+/***********************************/
+/* MC_CMD_SENSOR_SET_LIMS
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+
+/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+
+/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
+#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RESOURCE_LIMITS
+ */
+#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
+
+/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
+#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
+
+/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+/* total number of partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requestiong function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+/* Spacing of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+
+/* MC_CMD_RESOURCE_SPECIFIER enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+/* enum: None */
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
+
+/* EVB_PORT_ID structuredef */
+#define EVB_PORT_ID_LEN 4
+#define EVB_PORT_ID_PORT_ID_OFST 0
+/* enum: An invalid port handle. */
+#define EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_PORT_ID_LBN 0
+#define EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define EVB_VLAN_TAG_MODE_LBN 12
+#define EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define BUFTBL_ENTRY_OID_OFST 0
+#define BUFTBL_ENTRY_OID_LEN 2
+#define BUFTBL_ENTRY_OID_LBN 0
+#define BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define BUFTBL_ENTRY_PGSZ_OFST 2
+#define BUFTBL_ENTRY_PGSZ_LEN 2
+#define BUFTBL_ENTRY_PGSZ_LBN 16
+#define BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define BUFTBL_ENTRY_RAWADDR_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LEN 8
+#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define NVRAM_PARTITION_TYPE_LEN 2
+#define NVRAM_PARTITION_TYPE_ID_OFST 0
+#define NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Device state dump output partition */
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_ID_LBN 0
+#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+
+/* MC_CMD_INIT_RXQ_IN msgrequest */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+
+/* MC_CMD_INIT_TXQ_IN msgrequest */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+/* Bits 0 - 63 of event */
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_CMD
+ * Execute an arbitrary MCDI command on behalf of a different function, subject
+ * to security restrictions. The command to be proxied follows immediately
+ * afterward in the host buffer (or on the UART). This command supercedes
+ * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
+ */
+#define MC_CMD_PROXY_CMD 0x5b
+
+/* MC_CMD_PROXY_CMD_IN msgrequest */
+#define MC_CMD_PROXY_CMD_IN_LEN 4
+/* The handle of the target function. */
+#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define MC_CMD_PROXY_CMD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+/* ID */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+/* Num entries */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+/* Buffer table entry address */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+/* enum: single-recipient filter insert */
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to port 0 TX MAC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to port 1 TX MAC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
+ * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
+ * a valid handle.
+ */
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+/* enum: read the list of supported RX filter matches */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+
+
+/***********************************/
+/* MC_CMD_PARSER_DISP_RW
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging
+ */
+#define MC_CMD_PARSER_DISP_RW 0xe5
+
+/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
+#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
+/* identifies the target of the operation */
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+/* enum: RX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+/* enum: TX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* identifies the type of operation requested */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
+/* enum: read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: write a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+/* data memory address or LUE index */
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+/* value to write (for DMEM writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* value to write (for LUE writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
+
+/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
+#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
+/* value read (for DMEM reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+/* value read (for LUE reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
+/* up to 8 32-bit words of additional soft state from the LUE manager (the
+ * exact content is firmware-dependent and intended only for debug use)
+ */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_PF_COUNT
+ * Get number of PFs on the device.
+ */
+#define MC_CMD_GET_PF_COUNT 0xb6
+
+/* MC_CMD_GET_PF_COUNT_IN msgrequest */
+#define MC_CMD_GET_PF_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
+/* Identifies the number of PFs on the device. */
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
+
+
+/***********************************/
+/* MC_CMD_SET_PF_COUNT
+ * Set number of PFs on the device.
+ */
+#define MC_CMD_SET_PF_COUNT 0xb7
+
+/* MC_CMD_SET_PF_COUNT_IN msgrequest */
+#define MC_CMD_SET_PF_COUNT_IN_LEN 4
+/* New number of PFs on the device. */
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+
+/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+/* The maximum number of VIs that would be useful */
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_TLP_PROCESSING
+ * Get TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
+/* VI number to get information for. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
+/* Set no snoop bit for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_VI_TLP_PROCESSING
+ * Set TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
+/* VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
+/* Set the no snoop bit for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
+ * Get global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* enum: MISC. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+/* enum: IDO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+/* enum: RO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+/* enum: TPH Type. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
+ * Set global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SATELLITE_DOWNLOAD
+ * Download a new set of images to the satellite CPUs from the host.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+
+/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
+ * are subtle, and so downloads must proceed in a number of phases.
+ *
+ * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
+ * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
+ * be a checksum (a simple 32-bit sum) of the transferred data. An individual
+ * download may be aborted using CHUNK_ID_ABORT.
+ *
+ * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
+ * similar to PHASE_IMEMS.
+ *
+ * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * After any error (a requested abort is not considered to be an error) the
+ * sequence must be restarted from PHASE_RESET.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
+/* Download phase. (Note: the IDLE phase is used internally and is never valid
+ * in a command from the host.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+/* Target for download. (These match the blob numbers defined in
+ * mc_flash_layout.h.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+/* enum: Last chunk, containing checksum rather than data */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+/* enum: Abort download of this item */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+/* Length of this chunk in bytes */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+/* Data for this chunk */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
+
+/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+/* enum: Code download OK, completed. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+/* enum: Code download aborted as requested. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+/* enum: Code download OK so far, send next chunk. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+/* enum: Download phases out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+/* enum: Bad target for this phase */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+/* enum: Chunk ID out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+/* enum: Chunk length zero or too large */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+/* enum: Checksum was incorrect */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_ALLOC
+ * Allocate a pacer bucket (for qau rp or a snapper test)
+ */
+#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+
+/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
+
+/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_FREE
+ * Free a pacer bucket
+ */
+#define MC_CMD_TCM_BUCKET_FREE 0xb3
+
+/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+
+/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_INIT
+ * Initialise pacer bucket with a given rate
+ */
+#define MC_CMD_TCM_BUCKET_INIT 0xb4
+
+/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+
+/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_TXQ_INIT
+ * Initialise txq in pacer with given options or set options
+ */
+#define MC_CMD_TCM_TXQ_INIT 0xb5
+
+/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+/* bitmask of the priority queues this txq is inserted into */
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+
+/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
+#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+/* Function Local Instance (VI) number. */
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of v-switch to create. */
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* Flags controlling v-port creation */
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to support. */
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of the new v-port. */
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to insert/remove. */
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
+/* The port to connect to the v-adaptor's port. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Flags controlling v-adaptor creation */
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+/* The target function to modify. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+/* Write enable bits 0-3, set to write, clear to read. */
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* Number of queues spanned by this context, in the range 1-64; valid offsets
+ * in the indirection table will be in the range 0 to NUM_QUEUES-1.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+/* Hash control flags */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_ALLOC
+ * Allocate a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
+/* The handle of the owning upstream port */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
+ * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
+ * referenced RSS contexts must span no more than this number.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
+/* The handle of the new .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_FREE
+ * Free a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+
+/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE
+ * Set the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE
+ * Get the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_VECTOR_CFG
+ * Get Interrupt Vector config for this PF.
+ */
+#define MC_CMD_GET_VECTOR_CFG 0xbf
+
+/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
+
+/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
+/* Base absolute interrupt vector number. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_VECTOR_CFG
+ * Set Interrupt Vector config for this PF.
+ */
+#define MC_CMD_SET_VECTOR_CFG 0xc0
+
+/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
+/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
+ * let the system find a suitable base.
+ */
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+
+/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_CLASS_STATS
+ * Retrieve rmon rx class statistics
+ */
+#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
+
+/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_CLASS_STATS
+ * Retrieve rmon tx class statistics
+ */
+#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
+
+/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
+ * Retrieve rmon rx super_class statistics
+ */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
+ * Retrieve rmon tx super_class statistics
+ */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
+
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
+
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
+
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
+
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_ALLOC_CLASS
+ * Allocate an rmon class
+ */
+#define MC_CMD_RMON_ALLOC_CLASS 0xca
+
+/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
+#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
+
+/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
+/* class */
+#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_DEALLOC_CLASS
+ * Deallocate an rmon class
+ */
+#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
+
+/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
+#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
+/* class */
+#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
+
+/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS
+ * Allocate an rmon super_class
+ */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
+
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
+
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
+/* super_class */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
+ * Deallocate an rmon tx super_class
+ */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
+
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
+/* super_class */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
+
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_UP_CONV_STATS
+ * Retrieve up converter statistics
+ */
+#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
+
+/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPI_STATS
+ * Retrieve rx ipi stats
+ */
+#define MC_CMD_RMON_RX_IPI_STATS 0xcf
+
+/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
+#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
+#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
+#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
+ * Retrieve rx ipsec cntxt_ptr indexed stats
+ */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
+
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
+ * Retrieve rx ipsec port indexed stats
+ */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
+
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
+ * Retrieve tx ipsec overflow
+ */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
+
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+/* The number of MAC addresses returned */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+/* Array of MAC addresses */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+
+
+/***********************************/
+/* MC_CMD_DUMP_BUFTBL_ENTRIES
+ * Dump buffer table entries, mainly for command client debug use. Dumps
+ * absolute entries, and does not use chunk handles. All entries must be in
+ * range, and used for q page mapping, Although the latter restriction may be
+ * lifted in future.
+ */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
+/* Index of the first buffer table entry. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+/* Number of buffer table entries to dump. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
+/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_SET_RXDP_CONFIG
+ * Set global RXDP configuration settings
+ */
+#define MC_CMD_SET_RXDP_CONFIG 0xc1
+
+/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+
+/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RXDP_CONFIG
+ * Get global RXDP configuration settings
+ */
+#define MC_CMD_GET_RXDP_CONFIG 0xc2
+
+/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
+ * Retrieve rx class drop stats
+ */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
+
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
+ * Retrieve rx super class drop stats
+ */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_ERRORS_STATS
+ * Retrieve rxdp errors
+ */
+#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
+
+/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_OVERFLOW_STATS
+ * Retrieve rxdp overflow
+ */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
+
+/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPI_STATS
+ * Retrieve tx ipi stats
+ */
+#define MC_CMD_RMON_TX_IPI_STATS 0xd7
+
+/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
+#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
+#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
+#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
+ * Retrieve tx ipsec counters by cntxt_ptr
+ */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
+
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
+ * Retrieve tx ipsec counters by port
+ */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
+
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
+ * Retrieve tx ipsec overflow
+ */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
+
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_NOWHERE_STATS
+ * Retrieve tx nowhere stats
+ */
+#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
+
+/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
+ * Retrieve tx nowhere qbb stats
+ */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
+
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_ERRORS_STATS
+ * Retrieve rxdp errors
+ */
+#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
+
+/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_OVERFLOW_STATS
+ * Retrieve rxdp overflow
+ */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
+
+/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_COLLECT_CLASS_STATS
+ * Explicitly collect class stats at the specified evb port
+ */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
+
+/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
+/* The port id associated with the vport/pport at which to collect class stats
+ */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
+
+/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
+/* class */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
+ * Explicitly collect class stats at the specified evb port
+ */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
+
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
+/* The port id associated with the vport/pport at which to collect class stats
+ */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
+
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
+/* super_class */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* DPCPU frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_CLOCK
+ * Control the system and DPCPU clock frequencies. Changes are lost reboot.
+ */
+#define MC_CMD_SET_CLOCK 0xad
+
+/* MC_CMD_SET_CLOCK_IN msgrequest */
+#define MC_CMD_SET_CLOCK_IN_LEN 12
+/* Requested system frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+
+/* MC_CMD_SET_CLOCK_OUT msgresponse */
+#define MC_CMD_SET_CLOCK_OUT_LEN 12
+/* Resulting system frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* Resulting inter-core frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* Resulting DPCPU frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+
+
+/***********************************/
+/* MC_CMD_DPCPU_RPC
+ * Send an arbitrary DPCPU message.
+ */
+#define MC_CMD_DPCPU_RPC 0xae
+
+/* MC_CMD_DPCPU_RPC_IN msgrequest */
+#define MC_CMD_DPCPU_RPC_IN_LEN 36
+#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+/* enum: RxDPCPU */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0
+/* enum: TxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+/* enum: TxDPCPU1 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
+ * initialised to zero
+ */
+#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
+/* Register data to write. Only valid in write/write-read. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+/* Register address. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+
+/* MC_CMD_DPCPU_RPC_OUT msgresponse */
+#define MC_CMD_DPCPU_RPC_OUT_LEN 36
+#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+/* DATA */
+#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+
+
+/***********************************/
+/* MC_CMD_DUMP_DO
+ * Take a dump of the DUT state
+ */
+#define MC_CMD_DUMP_DO 0xe8
+
+/* MC_CMD_DUMP_DO_IN msgrequest */
+#define MC_CMD_DUMP_DO_IN_LEN 52
+#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+/* MC_CMD_DUMP_DO_OUT msgresponse */
+#define MC_CMD_DUMP_DO_OUT_LEN 4
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
+ * Configure unsolicited dumps
+ */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define MC_CMD_SET_PSU_IN_LEN 12
+#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+/* Offset from which to read the data */
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+/* Length of data to read in bytes */
+#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+/* Length of returned OTP data in bytes */
+#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+/* Returned data */
+#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_KR_TUNE
+ * Get or set KR Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_KR_TUNE 0xf1
+
+/* MC_CMD_KR_TUNE_IN msgrequest */
+#define MC_CMD_KR_TUNE_IN_LENMIN 4
+#define MC_CMD_KR_TUNE_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+/* enum: Force KR Serdes reset / recalibration */
+#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_KR_TUNE_OUT msgresponse */
+#define MC_CMD_KR_TUNE_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+/* enum: De-Emphasis Tap2 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+/* enum: Pre-Emphasis Magnitude */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+/* enum: Pre-Emphasis Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+/* enum: TX Slew Rate Coarse control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+/* enum: TX Slew Rate Fine control */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
+#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+
+/***********************************/
+/* MC_CMD_PCIE_TUNE
+ * Get or set PCIE Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_PCIE_TUNE 0xf2
+
+/* MC_CMD_PCIE_TUNE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
+#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_PCIE_TUNE_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TxMargin (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+/* enum: TxSwing (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+/* enum: De-emphasis coefficient C(-1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+/* enum: De-emphasis coefficient C(0) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+/* enum: De-emphasis coefficient C(+1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ */
+#define MC_CMD_LICENSING 0xf3
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+/* count of application keys which are invalid due to being blacklisted */
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+/* count of application keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_MC2MC_PROXY
+ * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
+ * This will fail on a single-core system.
+ */
+#define MC_CMD_MC2MC_PROXY 0xf4
+
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.)
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+#endif /* MCDI_PCOL_H */
diff --git a/kernel/drivers/net/ethernet/sfc/mcdi_port.c b/kernel/drivers/net/ethernet/sfc/mcdi_port.c
new file mode 100644
index 000000000..fb19b70ea
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/mcdi_port.c
@@ -0,0 +1,1035 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/*
+ * Driver for PHY related operations via MCDI.
+ */
+
+#include <linux/slab.h>
+#include "efx.h"
+#include "phy.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+#include "selftest.h"
+
+struct efx_mcdi_phy_data {
+ u32 flags;
+ u32 type;
+ u32 supported_cap;
+ u32 channel;
+ u32 port;
+ u32 stats_mask;
+ u8 name[20];
+ u32 media;
+ u32 mmd_mask;
+ u8 revision[20];
+ u32 forced_cap;
+};
+
+static int
+efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
+ BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
+ cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
+ cfg->supported_cap =
+ MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+ cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
+ cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
+ cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
+ memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
+ sizeof(cfg->name));
+ cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
+ memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
+ sizeof(cfg->revision));
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+ u32 flags, u32 loopback_mode,
+ u32 loopback_speed)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
+ MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_mdio_read(struct net_device *net_dev,
+ int prtad, int devad, u16 addr)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
+ MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
+ return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
+}
+
+static int efx_mcdi_mdio_write(struct net_device *net_dev,
+ int prtad, int devad, u16 addr, u16 value)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
+ MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
+ return 0;
+}
+
+static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
+{
+ u32 result = 0;
+
+ switch (media) {
+ case MC_CMD_MEDIA_KX4:
+ result |= SUPPORTED_Backplane;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ result |= SUPPORTED_1000baseKX_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ result |= SUPPORTED_10000baseKX4_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ result |= SUPPORTED_40000baseKR4_Full;
+ break;
+
+ case MC_CMD_MEDIA_XFP:
+ case MC_CMD_MEDIA_SFP_PLUS:
+ result |= SUPPORTED_FIBRE;
+ break;
+
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ result |= SUPPORTED_FIBRE;
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ result |= SUPPORTED_40000baseCR4_Full;
+ break;
+
+ case MC_CMD_MEDIA_BASE_T:
+ result |= SUPPORTED_TP;
+ if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ result |= SUPPORTED_10baseT_Half;
+ if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ result |= SUPPORTED_10baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ result |= SUPPORTED_100baseT_Half;
+ if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ result |= SUPPORTED_100baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ result |= SUPPORTED_1000baseT_Half;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ result |= SUPPORTED_1000baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ result |= SUPPORTED_10000baseT_Full;
+ break;
+ }
+
+ if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ result |= SUPPORTED_Pause;
+ if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ result |= SUPPORTED_Asym_Pause;
+ if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ result |= SUPPORTED_Autoneg;
+
+ return result;
+}
+
+static u32 ethtool_to_mcdi_cap(u32 cap)
+{
+ u32 result = 0;
+
+ if (cap & SUPPORTED_10baseT_Half)
+ result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
+ if (cap & SUPPORTED_10baseT_Full)
+ result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
+ if (cap & SUPPORTED_100baseT_Half)
+ result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
+ if (cap & SUPPORTED_100baseT_Full)
+ result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
+ if (cap & SUPPORTED_1000baseT_Half)
+ result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
+ if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full))
+ result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
+ if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
+ if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full))
+ result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
+ if (cap & SUPPORTED_Pause)
+ result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
+ if (cap & SUPPORTED_Asym_Pause)
+ result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
+ if (cap & SUPPORTED_Autoneg)
+ result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+
+ return result;
+}
+
+static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ enum efx_phy_mode mode, supported;
+ u32 flags;
+
+ /* TODO: Advertise the capabilities supported by this PHY */
+ supported = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
+ supported |= PHY_MODE_TX_DISABLED;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
+ supported |= PHY_MODE_LOW_POWER;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
+ supported |= PHY_MODE_OFF;
+
+ mode = efx->phy_mode & supported;
+
+ flags = 0;
+ if (mode & PHY_MODE_TX_DISABLED)
+ flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
+ if (mode & PHY_MODE_LOW_POWER)
+ flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
+ if (mode & PHY_MODE_OFF)
+ flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
+
+ return flags;
+}
+
+static u32 mcdi_to_ethtool_media(u32 media)
+{
+ switch (media) {
+ case MC_CMD_MEDIA_XAUI:
+ case MC_CMD_MEDIA_CX4:
+ case MC_CMD_MEDIA_KX4:
+ return PORT_OTHER;
+
+ case MC_CMD_MEDIA_XFP:
+ case MC_CMD_MEDIA_SFP_PLUS:
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ return PORT_FIBRE;
+
+ case MC_CMD_MEDIA_BASE_T:
+ return PORT_TP;
+
+ default:
+ return PORT_OTHER;
+ }
+}
+
+static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl)
+{
+ switch (fcntl) {
+ case MC_CMD_FCNTL_AUTO:
+ WARN_ON(1); /* This is not a link mode */
+ link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_BIDIR:
+ link_state->fc = EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_RESPOND:
+ link_state->fc = EFX_FC_RX;
+ break;
+ default:
+ WARN_ON(1);
+ case MC_CMD_FCNTL_OFF:
+ link_state->fc = 0;
+ break;
+ }
+
+ link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+ link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ link_state->speed = speed;
+}
+
+static int efx_mcdi_phy_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ u32 caps;
+ int rc;
+
+ /* Initialise and populate phy_data */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (phy_data == NULL)
+ return -ENOMEM;
+
+ rc = efx_mcdi_get_phy_cfg(efx, phy_data);
+ if (rc != 0)
+ goto fail;
+
+ /* Read initial link advertisement */
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ goto fail;
+
+ /* Fill out nic state */
+ efx->phy_data = phy_data;
+ efx->phy_type = phy_data->type;
+
+ efx->mdio_bus = phy_data->channel;
+ efx->mdio.prtad = phy_data->port;
+ efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
+ efx->mdio.mode_support = 0;
+ if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
+ efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
+ if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
+ efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
+ if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->link_advertising =
+ mcdi_to_ethtool_cap(phy_data->media, caps);
+ else
+ phy_data->forced_cap = caps;
+
+ /* Assert that we can map efx -> mcdi loopback modes */
+ BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
+ BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
+ BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC);
+ BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII);
+ BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS);
+ BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI);
+ BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII);
+ BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII);
+ BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR);
+ BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI);
+ BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR);
+ BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR);
+ BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR);
+ BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR);
+ BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY);
+ BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS);
+ BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS);
+ BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD);
+ BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT);
+ BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR);
+ BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS);
+ BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS);
+ BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR);
+ BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS);
+
+ rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes);
+ if (rc != 0)
+ goto fail;
+ /* The MC indicates that LOOPBACK_NONE is a valid loopback mode,
+ * but by convention we don't */
+ efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
+
+ /* Set the initial link mode */
+ efx_mcdi_phy_decode_link(
+ efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+ /* Default to Autonegotiated flow control if the PHY supports it */
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->wanted_fc |= EFX_FC_AUTO;
+ efx_link_set_wanted_fc(efx, efx->wanted_fc);
+
+ return 0;
+
+fail:
+ kfree(phy_data);
+ return rc;
+}
+
+int efx_mcdi_port_reconfigure(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 caps = (efx->link_advertising ?
+ ethtool_to_mcdi_cap(efx->link_advertising) :
+ phy_cfg->forced_cap);
+
+ return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+ efx->loopback_mode, 0);
+}
+
+/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
+ * supported by the link partner. Warn the user if this isn't the case
+ */
+static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 rmtadv;
+
+ /* The link partner capabilities are only relevant if the
+ * link supports flow control autonegotiation */
+ if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ return;
+
+ /* If flow control autoneg is supported and enabled, then fine */
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ return;
+
+ rmtadv = 0;
+ if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ rmtadv |= ADVERTISED_Pause;
+ if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ rmtadv |= ADVERTISED_Asym_Pause;
+
+ if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
+ netif_err(efx, link, efx->net_dev,
+ "warning: link partner doesn't support pause frames");
+}
+
+static bool efx_mcdi_phy_poll(struct efx_nic *efx)
+{
+ struct efx_link_state old_state = efx->link_state;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ int rc;
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ efx->link_state.up = false;
+ else
+ efx_mcdi_phy_decode_link(
+ efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+ return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+static void efx_mcdi_phy_remove(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ efx->phy_data = NULL;
+ kfree(phy_data);
+}
+
+static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ int rc;
+
+ ecmd->supported =
+ mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
+ ecmd->advertising = efx->link_advertising;
+ ethtool_cmd_speed_set(ecmd, efx->link_state.speed);
+ ecmd->duplex = efx->link_state.fd;
+ ecmd->port = mcdi_to_ethtool_media(phy_cfg->media);
+ ecmd->phy_address = phy_cfg->port;
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+ ecmd->mdio_support = (efx->mdio.mode_support &
+ (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return;
+ ecmd->lp_advertising =
+ mcdi_to_ethtool_cap(phy_cfg->media,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
+}
+
+static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 caps;
+ int rc;
+
+ if (ecmd->autoneg) {
+ caps = (ethtool_to_mcdi_cap(ecmd->advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
+ } else if (ecmd->duplex) {
+ switch (ethtool_cmd_speed(ecmd)) {
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
+ case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
+ default: return -EINVAL;
+ }
+ } else {
+ switch (ethtool_cmd_speed(ecmd)) {
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
+ default: return -EINVAL;
+ }
+ }
+
+ rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+ efx->loopback_mode, 0);
+ if (rc)
+ return rc;
+
+ if (ecmd->autoneg) {
+ efx_link_set_advertising(
+ efx, ecmd->advertising | ADVERTISED_Autoneg);
+ phy_cfg->forced_cap = 0;
+ } else {
+ efx_link_set_advertising(efx, 0);
+ phy_cfg->forced_cap = caps;
+ }
+ return 0;
+}
+
+static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
+ return -EIO;
+ if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const char *const mcdi_sft9001_cable_diag_names[] = {
+ "cable.pairA.length",
+ "cable.pairB.length",
+ "cable.pairC.length",
+ "cable.pairD.length",
+ "cable.pairA.status",
+ "cable.pairB.status",
+ "cable.pairC.status",
+ "cable.pairD.status",
+};
+
+static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
+ int *results)
+{
+ unsigned int retry, i, count = 0;
+ size_t outlen;
+ u32 status;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
+ u8 *ptr;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST,
+ inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
+ if (rc)
+ goto out;
+
+ /* Wait up to 10s for BIST to finish */
+ for (retry = 0; retry < 100; ++retry) {
+ BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto out;
+
+ status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
+ if (status != MC_CMD_POLL_BIST_RUNNING)
+ goto finished;
+
+ msleep(100);
+ }
+
+ rc = -ETIMEDOUT;
+ goto out;
+
+finished:
+ results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
+
+ /* SFT9001 specific cable diagnostics output */
+ if (efx->phy_type == PHY_TYPE_SFT9001B &&
+ (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
+ bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
+ ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ if (status == MC_CMD_POLL_BIST_PASSED &&
+ outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
+ for (i = 0; i < 8; i++) {
+ results[count + i] =
+ EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
+ EFX_DWORD_0);
+ }
+ }
+ count += 8;
+ }
+ rc = count;
+
+out:
+ return rc;
+}
+
+static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
+ unsigned flags)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 mode;
+ int rc;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
+ rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
+ if (rc < 0)
+ return rc;
+
+ results += rc;
+ }
+
+ /* If we support both LONG and SHORT, then run each in response to
+ * break or not. Otherwise, run the one we support */
+ mode = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) {
+ if ((flags & ETH_TEST_FL_OFFLINE) &&
+ (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+ else
+ mode = MC_CMD_PHY_BIST_CABLE_SHORT;
+ } else if (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+
+ if (mode != 0) {
+ rc = efx_mcdi_bist(efx, mode, results);
+ if (rc < 0)
+ return rc;
+ results += rc;
+ }
+
+ return 0;
+}
+
+static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
+ unsigned int index)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
+ if (index == 0)
+ return "bist";
+ --index;
+ }
+
+ if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) |
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) {
+ if (index == 0)
+ return "cable";
+ --index;
+
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
+ return mcdi_sft9001_cable_diag_names[index];
+ index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
+ }
+ }
+
+ return NULL;
+}
+
+#define SFP_PAGE_SIZE 128
+#define SFP_NUM_PAGES 2
+static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
+ size_t outlen;
+ int rc;
+ unsigned int payload_len;
+ unsigned int space_remaining = ee->len;
+ unsigned int page;
+ unsigned int page_off;
+ unsigned int to_copy;
+ u8 *user_data = data;
+
+ BUILD_BUG_ON(SFP_PAGE_SIZE * SFP_NUM_PAGES != ETH_MODULE_SFF_8079_LEN);
+
+ page_off = ee->offset % SFP_PAGE_SIZE;
+ page = ee->offset / SFP_PAGE_SIZE;
+
+ while (space_remaining && (page < SFP_NUM_PAGES)) {
+ MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_MEDIA_INFO,
+ inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST +
+ SFP_PAGE_SIZE))
+ return -EIO;
+
+ payload_len = MCDI_DWORD(outbuf,
+ GET_PHY_MEDIA_INFO_OUT_DATALEN);
+ if (payload_len != SFP_PAGE_SIZE)
+ return -EIO;
+
+ /* Copy as much as we can into data */
+ payload_len -= page_off;
+ to_copy = (space_remaining < payload_len) ?
+ space_remaining : payload_len;
+
+ memcpy(user_data,
+ MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off,
+ to_copy);
+
+ space_remaining -= to_copy;
+ user_data += to_copy;
+ page_off = 0;
+ page++;
+ }
+
+ return 0;
+}
+
+static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
+ struct ethtool_modinfo *modinfo)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+
+ switch (phy_cfg->media) {
+ case MC_CMD_MEDIA_SFP_PLUS:
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct efx_phy_operations efx_mcdi_phy_ops = {
+ .probe = efx_mcdi_phy_probe,
+ .init = efx_port_dummy_op_int,
+ .reconfigure = efx_mcdi_port_reconfigure,
+ .poll = efx_mcdi_phy_poll,
+ .fini = efx_port_dummy_op_void,
+ .remove = efx_mcdi_phy_remove,
+ .get_settings = efx_mcdi_phy_get_settings,
+ .set_settings = efx_mcdi_phy_set_settings,
+ .test_alive = efx_mcdi_phy_test_alive,
+ .run_tests = efx_mcdi_phy_run_tests,
+ .test_name = efx_mcdi_phy_test_name,
+ .get_module_eeprom = efx_mcdi_phy_get_module_eeprom,
+ .get_module_info = efx_mcdi_phy_get_module_info,
+};
+
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ return phy_data->supported_cap;
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+ [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+ [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+};
+
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+ u32 flags, fcntl, speed, lpa;
+
+ speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+ EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+ speed = efx_mcdi_event_link_speed[speed];
+
+ flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+ fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+ lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+ /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+ * which is only run after flushing the event queues. Therefore, it
+ * is safe to modify the link state outside of the mac_lock here.
+ */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+ efx_mcdi_phy_check_fcntl(efx, lpa);
+
+ efx_link_status_changed(efx);
+}
+
+int efx_mcdi_set_mac(struct efx_nic *efx)
+{
+ u32 fcntl;
+ MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+
+ ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+ efx->net_dev->dev_addr);
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
+
+ /* Set simple MAC filter for Siena */
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
+
+ switch (efx->wanted_fc) {
+ case EFX_FC_RX | EFX_FC_TX:
+ fcntl = MC_CMD_FCNTL_BIDIR;
+ break;
+ case EFX_FC_RX:
+ fcntl = MC_CMD_FCNTL_RESPOND;
+ break;
+ default:
+ fcntl = MC_CMD_FCNTL_OFF;
+ break;
+ }
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ fcntl = MC_CMD_FCNTL_AUTO;
+ if (efx->fc_disable)
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
+ NULL, 0, NULL);
+}
+
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ size_t outlength;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc)
+ return true;
+
+ return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
+}
+
+enum efx_stats_action {
+ EFX_STATS_ENABLE,
+ EFX_STATS_DISABLE,
+ EFX_STATS_PULL,
+};
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx,
+ enum efx_stats_action action, int clear)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+ int rc;
+ int change = action == EFX_STATS_PULL ? 0 : 1;
+ int enable = action == EFX_STATS_ENABLE ? 1 : 0;
+ int period = action == EFX_STATS_ENABLE ? 1000 : 0;
+ dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
+ u32 dma_len = action != EFX_STATS_DISABLE ?
+ MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
+
+ BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
+
+ MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
+ MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, !!enable,
+ MAC_STATS_IN_CLEAR, clear,
+ MAC_STATS_IN_PERIODIC_CHANGE, change,
+ MAC_STATS_IN_PERIODIC_ENABLE, enable,
+ MAC_STATS_IN_PERIODIC_CLEAR, 0,
+ MAC_STATS_IN_PERIODIC_NOEVENT, 1,
+ MAC_STATS_IN_PERIOD_MS, period);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ return rc;
+}
+
+void efx_mcdi_mac_start_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+
+ efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
+}
+
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
+{
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
+}
+
+#define EFX_MAC_STATS_WAIT_US 100
+#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
+
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+ int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+ efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
+
+ while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+ EFX_MC_STATS_GENERATION_INVALID &&
+ attempts-- != 0)
+ udelay(EFX_MAC_STATS_WAIT_US);
+}
+
+int efx_mcdi_port_probe(struct efx_nic *efx)
+{
+ int rc;
+
+ /* Hook in PHY operations table */
+ efx->phy_op = &efx_mcdi_phy_ops;
+
+ /* Set up MDIO structure for PHY */
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ efx->mdio.mdio_read = efx_mcdi_mdio_read;
+ efx->mdio.mdio_write = efx_mcdi_mdio_write;
+
+ /* Fill out MDIO structure, loopback modes, and initial link state */
+ rc = efx->phy_op->probe(efx);
+ if (rc != 0)
+ return rc;
+
+ /* Allocate buffer for stats */
+ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+ MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
+ if (rc)
+ return rc;
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
+
+ efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 1);
+
+ return 0;
+}
+
+void efx_mcdi_port_remove(struct efx_nic *efx)
+{
+ efx->phy_op->remove(efx);
+ efx_nic_free_buffer(efx, &efx->stats_buffer);
+}
+
+/* Get physical port number (EF10 only; on Siena it is same as PF number) */
+int efx_mcdi_port_get_number(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
+}
diff --git a/kernel/drivers/net/ethernet/sfc/mdio_10g.c b/kernel/drivers/net/ethernet/sfc/mdio_10g.c
new file mode 100644
index 000000000..8ff954c59
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/mdio_10g.c
@@ -0,0 +1,323 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+/*
+ * Useful functions for working with MDIO clause 45 PHYs
+ */
+#include <linux/types.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include "net_driver.h"
+#include "mdio_10g.h"
+#include "workarounds.h"
+
+unsigned efx_mdio_id_oui(u32 id)
+{
+ unsigned oui = 0;
+ int i;
+
+ /* The bits of the OUI are designated a..x, with a=0 and b variable.
+ * In the id register c is the MSB but the OUI is conventionally
+ * written as bytes h..a, p..i, x..q. Reorder the bits accordingly. */
+ for (i = 0; i < 22; ++i)
+ if (id & (1 << (i + 10)))
+ oui |= 1 << (i ^ 7);
+
+ return oui;
+}
+
+int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
+ int spins, int spintime)
+{
+ u32 ctrl;
+
+ /* Catch callers passing values in the wrong units (or just silly) */
+ EFX_BUG_ON_PARANOID(spins * spintime >= 5000);
+
+ efx_mdio_write(port, mmd, MDIO_CTRL1, MDIO_CTRL1_RESET);
+ /* Wait for the reset bit to clear. */
+ do {
+ msleep(spintime);
+ ctrl = efx_mdio_read(port, mmd, MDIO_CTRL1);
+ spins--;
+
+ } while (spins && (ctrl & MDIO_CTRL1_RESET));
+
+ return spins ? spins : -ETIMEDOUT;
+}
+
+static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
+{
+ int status;
+
+ if (mmd != MDIO_MMD_AN) {
+ /* Read MMD STATUS2 to check it is responding. */
+ status = efx_mdio_read(efx, mmd, MDIO_STAT2);
+ if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) {
+ netif_err(efx, hw, efx->net_dev,
+ "PHY MMD %d not responding.\n", mmd);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/* This ought to be ridiculous overkill. We expect it to fail rarely */
+#define MDIO45_RESET_TIME 1000 /* ms */
+#define MDIO45_RESET_ITERS 100
+
+int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
+{
+ const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
+ int tries = MDIO45_RESET_ITERS;
+ int rc = 0;
+ int in_reset;
+
+ while (tries) {
+ int mask = mmd_mask;
+ int mmd = 0;
+ int stat;
+ in_reset = 0;
+ while (mask) {
+ if (mask & 1) {
+ stat = efx_mdio_read(efx, mmd, MDIO_CTRL1);
+ if (stat < 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "failed to read status of"
+ " MMD %d\n", mmd);
+ return -EIO;
+ }
+ if (stat & MDIO_CTRL1_RESET)
+ in_reset |= (1 << mmd);
+ }
+ mask = mask >> 1;
+ mmd++;
+ }
+ if (!in_reset)
+ break;
+ tries--;
+ msleep(spintime);
+ }
+ if (in_reset != 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "not all MMDs came out of reset in time."
+ " MMDs still in reset: %x\n", in_reset);
+ rc = -ETIMEDOUT;
+ }
+ return rc;
+}
+
+int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
+{
+ int mmd = 0, probe_mmd, devs1, devs2;
+ u32 devices;
+
+ /* Historically we have probed the PHYXS to find out what devices are
+ * present,but that doesn't work so well if the PHYXS isn't expected
+ * to exist, if so just find the first item in the list supplied. */
+ probe_mmd = (mmd_mask & MDIO_DEVS_PHYXS) ? MDIO_MMD_PHYXS :
+ __ffs(mmd_mask);
+
+ /* Check all the expected MMDs are present */
+ devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1);
+ devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2);
+ if (devs1 < 0 || devs2 < 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "failed to read devices present\n");
+ return -EIO;
+ }
+ devices = devs1 | (devs2 << 16);
+ if ((devices & mmd_mask) != mmd_mask) {
+ netif_err(efx, hw, efx->net_dev,
+ "required MMDs not present: got %x, wanted %x\n",
+ devices, mmd_mask);
+ return -ENODEV;
+ }
+ netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices);
+
+ /* Check all required MMDs are responding and happy. */
+ while (mmd_mask) {
+ if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
+ return -EIO;
+ mmd_mask = mmd_mask >> 1;
+ mmd++;
+ }
+
+ return 0;
+}
+
+bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
+{
+ /* If the port is in loopback, then we should only consider a subset
+ * of mmd's */
+ if (LOOPBACK_INTERNAL(efx))
+ return true;
+ else if (LOOPBACK_MASK(efx) & LOOPBACKS_WS)
+ return false;
+ else if (efx_phy_mode_disabled(efx->phy_mode))
+ return false;
+ else if (efx->loopback_mode == LOOPBACK_PHYXS)
+ mmd_mask &= ~(MDIO_DEVS_PHYXS |
+ MDIO_DEVS_PCS |
+ MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_AN);
+ else if (efx->loopback_mode == LOOPBACK_PCS)
+ mmd_mask &= ~(MDIO_DEVS_PCS |
+ MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_AN);
+ else if (efx->loopback_mode == LOOPBACK_PMAPMD)
+ mmd_mask &= ~(MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_AN);
+
+ return mdio45_links_ok(&efx->mdio, mmd_mask);
+}
+
+void efx_mdio_transmit_disable(struct efx_nic *efx)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
+ MDIO_PMA_TXDIS, MDIO_PMD_TXDIS_GLOBAL,
+ efx->phy_mode & PHY_MODE_TX_DISABLED);
+}
+
+void efx_mdio_phy_reconfigure(struct efx_nic *efx)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
+ MDIO_CTRL1, MDIO_PMA_CTRL1_LOOPBACK,
+ efx->loopback_mode == LOOPBACK_PMAPMD);
+ efx_mdio_set_flag(efx, MDIO_MMD_PCS,
+ MDIO_CTRL1, MDIO_PCS_CTRL1_LOOPBACK,
+ efx->loopback_mode == LOOPBACK_PCS);
+ efx_mdio_set_flag(efx, MDIO_MMD_PHYXS,
+ MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK,
+ efx->loopback_mode == LOOPBACK_PHYXS_WS);
+}
+
+static void efx_mdio_set_mmd_lpower(struct efx_nic *efx,
+ int lpower, int mmd)
+{
+ int stat = efx_mdio_read(efx, mmd, MDIO_STAT1);
+
+ netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n",
+ mmd, lpower);
+
+ if (stat & MDIO_STAT1_LPOWERABLE) {
+ efx_mdio_set_flag(efx, mmd, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER, lpower);
+ }
+}
+
+void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
+ int low_power, unsigned int mmd_mask)
+{
+ int mmd = 0;
+ mmd_mask &= ~MDIO_DEVS_AN;
+ while (mmd_mask) {
+ if (mmd_mask & 1)
+ efx_mdio_set_mmd_lpower(efx, low_power, mmd);
+ mmd_mask = (mmd_mask >> 1);
+ mmd++;
+ }
+}
+
+/**
+ * efx_mdio_set_settings - Set (some of) the PHY settings over MDIO.
+ * @efx: Efx NIC
+ * @ecmd: New settings
+ */
+int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET };
+
+ efx->phy_op->get_settings(efx, &prev);
+
+ if (ecmd->advertising == prev.advertising &&
+ ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) &&
+ ecmd->duplex == prev.duplex &&
+ ecmd->port == prev.port &&
+ ecmd->autoneg == prev.autoneg)
+ return 0;
+
+ /* We can only change these settings for -T PHYs */
+ if (prev.port != PORT_TP || ecmd->port != PORT_TP)
+ return -EINVAL;
+
+ /* Check that PHY supports these settings */
+ if (!ecmd->autoneg ||
+ (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
+ return -EINVAL;
+
+ efx_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
+ efx_mdio_an_reconfigure(efx);
+ return 0;
+}
+
+/**
+ * efx_mdio_an_reconfigure - Push advertising flags and restart autonegotiation
+ * @efx: Efx NIC
+ */
+void efx_mdio_an_reconfigure(struct efx_nic *efx)
+{
+ int reg;
+
+ WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
+
+ /* Set up the base page */
+ reg = ADVERTISE_CSMA | ADVERTISE_RESV;
+ if (efx->link_advertising & ADVERTISED_Pause)
+ reg |= ADVERTISE_PAUSE_CAP;
+ if (efx->link_advertising & ADVERTISED_Asym_Pause)
+ reg |= ADVERTISE_PAUSE_ASYM;
+ efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+
+ /* Set up the (extended) next page */
+ efx->phy_op->set_npage_adv(efx, efx->link_advertising);
+
+ /* Enable and restart AN */
+ reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
+ reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP;
+ efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
+
+u8 efx_mdio_get_pause(struct efx_nic *efx)
+{
+ BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
+
+ if (!(efx->wanted_fc & EFX_FC_AUTO))
+ return efx->wanted_fc;
+
+ WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
+
+ return mii_resolve_flowctrl_fdx(
+ mii_advertise_flowctrl(efx->wanted_fc),
+ efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
+}
+
+int efx_mdio_test_alive(struct efx_nic *efx)
+{
+ int rc;
+ int devad = __ffs(efx->mdio.mmds);
+ u16 physid1, physid2;
+
+ mutex_lock(&efx->mac_lock);
+
+ physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
+ physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
+
+ if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
+ (physid2 == 0x0000) || (physid2 == 0xffff)) {
+ netif_err(efx, hw, efx->net_dev,
+ "no MDIO PHY present with ID %d\n", efx->mdio.prtad);
+ rc = -EINVAL;
+ } else {
+ rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
+ }
+
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
diff --git a/kernel/drivers/net/ethernet/sfc/mdio_10g.h b/kernel/drivers/net/ethernet/sfc/mdio_10g.h
new file mode 100644
index 000000000..4a2dc4c28
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/mdio_10g.h
@@ -0,0 +1,110 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_MDIO_10G_H
+#define EFX_MDIO_10G_H
+
+#include <linux/mdio.h>
+
+/*
+ * Helper functions for doing 10G MDIO as specified in IEEE 802.3 clause 45.
+ */
+
+#include "efx.h"
+
+static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
+static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
+unsigned efx_mdio_id_oui(u32 id);
+
+static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
+{
+ return efx->mdio.mdio_read(efx->net_dev, efx->mdio.prtad, devad, addr);
+}
+
+static inline void
+efx_mdio_write(struct efx_nic *efx, int devad, int addr, int value)
+{
+ efx->mdio.mdio_write(efx->net_dev, efx->mdio.prtad, devad, addr, value);
+}
+
+static inline u32 efx_mdio_read_id(struct efx_nic *efx, int mmd)
+{
+ u16 id_low = efx_mdio_read(efx, mmd, MDIO_DEVID2);
+ u16 id_hi = efx_mdio_read(efx, mmd, MDIO_DEVID1);
+ return (id_hi << 16) | (id_low);
+}
+
+static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
+{
+ int i, lane_status;
+ bool sync;
+
+ for (i = 0; i < 2; ++i)
+ lane_status = efx_mdio_read(efx, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_LNSTAT);
+
+ sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN);
+ if (!sync)
+ netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n",
+ lane_status);
+ return sync;
+}
+
+const char *efx_mdio_mmd_name(int mmd);
+
+/*
+ * Reset a specific MMD and wait for reset to clear.
+ * Return number of spins left (>0) on success, -%ETIMEDOUT on failure.
+ *
+ * This function will sleep
+ */
+int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
+
+/* As efx_mdio_check_mmd but for multiple MMDs */
+int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
+
+/* Check the link status of specified mmds in bit mask */
+bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
+
+/* Generic transmit disable support though PMAPMD */
+void efx_mdio_transmit_disable(struct efx_nic *efx);
+
+/* Generic part of reconfigure: set/clear loopback bits */
+void efx_mdio_phy_reconfigure(struct efx_nic *efx);
+
+/* Set the power state of the specified MMDs */
+void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
+ unsigned int mmd_mask);
+
+/* Set (some of) the PHY settings over MDIO */
+int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+
+/* Push advertising flags and restart autonegotiation */
+void efx_mdio_an_reconfigure(struct efx_nic *efx);
+
+/* Get pause parameters from AN if available (otherwise return
+ * requested pause parameters)
+ */
+u8 efx_mdio_get_pause(struct efx_nic *efx);
+
+/* Wait for specified MMDs to exit reset within a timeout */
+int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
+
+/* Set or clear flag, debouncing */
+static inline void
+efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
+ int mask, bool state)
+{
+ mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
+}
+
+/* Liveness self-test for MDIO PHYs */
+int efx_mdio_test_alive(struct efx_nic *efx);
+
+#endif /* EFX_MDIO_10G_H */
diff --git a/kernel/drivers/net/ethernet/sfc/mtd.c b/kernel/drivers/net/ethernet/sfc/mtd.c
new file mode 100644
index 000000000..a77a8bd2d
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/mtd.c
@@ -0,0 +1,133 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+
+#include "net_driver.h"
+#include "efx.h"
+
+#define to_efx_mtd_partition(mtd) \
+ container_of(mtd, struct efx_mtd_partition, mtd)
+
+/* MTD interface */
+
+static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ struct efx_nic *efx = mtd->priv;
+ int rc;
+
+ rc = efx->type->mtd_erase(mtd, erase->addr, erase->len);
+ if (rc == 0) {
+ erase->state = MTD_ERASE_DONE;
+ } else {
+ erase->state = MTD_ERASE_FAILED;
+ erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ }
+ mtd_erase_callback(erase);
+ return rc;
+}
+
+static void efx_mtd_sync(struct mtd_info *mtd)
+{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ int rc;
+
+ rc = efx->type->mtd_sync(mtd);
+ if (rc)
+ pr_err("%s: %s sync failed (%d)\n",
+ part->name, part->dev_type_name, rc);
+}
+
+static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
+{
+ int rc;
+
+ for (;;) {
+ rc = mtd_device_unregister(&part->mtd);
+ if (rc != -EBUSY)
+ break;
+ ssleep(1);
+ }
+ WARN_ON(rc);
+ list_del(&part->node);
+}
+
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part)
+{
+ struct efx_mtd_partition *part;
+ size_t i;
+
+ for (i = 0; i < n_parts; i++) {
+ part = (struct efx_mtd_partition *)((char *)parts +
+ i * sizeof_part);
+
+ part->mtd.writesize = 1;
+
+ part->mtd.owner = THIS_MODULE;
+ part->mtd.priv = efx;
+ part->mtd.name = part->name;
+ part->mtd._erase = efx_mtd_erase;
+ part->mtd._read = efx->type->mtd_read;
+ part->mtd._write = efx->type->mtd_write;
+ part->mtd._sync = efx_mtd_sync;
+
+ efx->type->mtd_rename(part);
+
+ if (mtd_device_register(&part->mtd, NULL, 0))
+ goto fail;
+
+ /* Add to list in order - efx_mtd_remove() depends on this */
+ list_add_tail(&part->node, &efx->mtd_list);
+ }
+
+ return 0;
+
+fail:
+ while (i--) {
+ part = (struct efx_mtd_partition *)((char *)parts +
+ i * sizeof_part);
+ efx_mtd_remove_partition(part);
+ }
+ /* Failure is unlikely here, but probably means we're out of memory */
+ return -ENOMEM;
+}
+
+void efx_mtd_remove(struct efx_nic *efx)
+{
+ struct efx_mtd_partition *parts, *part, *next;
+
+ WARN_ON(efx_dev_registered(efx));
+
+ if (list_empty(&efx->mtd_list))
+ return;
+
+ parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
+ node);
+
+ list_for_each_entry_safe(part, next, &efx->mtd_list, node)
+ efx_mtd_remove_partition(part);
+
+ kfree(parts);
+}
+
+void efx_mtd_rename(struct efx_nic *efx)
+{
+ struct efx_mtd_partition *part;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(part, &efx->mtd_list, node)
+ efx->type->mtd_rename(part);
+}
diff --git a/kernel/drivers/net/ethernet/sfc/net_driver.h b/kernel/drivers/net/ethernet/sfc/net_driver.h
new file mode 100644
index 000000000..325dd94bc
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/net_driver.h
@@ -0,0 +1,1504 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/* Common definitions for all Efx net driver code */
+
+#ifndef EFX_NET_DRIVER_H
+#define EFX_NET_DRIVER_H
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/timer.h>
+#include <linux/mdio.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <linux/i2c.h>
+#include <linux/mtd/mtd.h>
+#include <net/busy_poll.h>
+
+#include "enum.h"
+#include "bitfield.h"
+#include "filter.h"
+
+/**************************************************************************
+ *
+ * Build definitions
+ *
+ **************************************************************************/
+
+#define EFX_DRIVER_VERSION "4.0"
+
+#ifdef DEBUG
+#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
+#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define EFX_BUG_ON_PARANOID(x) do {} while (0)
+#define EFX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+/**************************************************************************
+ *
+ * Efx data structures
+ *
+ **************************************************************************/
+
+#define EFX_MAX_CHANNELS 32U
+#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
+#define EFX_EXTRA_CHANNEL_IOV 0
+#define EFX_EXTRA_CHANNEL_PTP 1
+#define EFX_MAX_EXTRA_CHANNELS 2U
+
+/* Checksum generation is a per-queue option in hardware, so each
+ * queue visible to the networking core is backed by two hardware TX
+ * queues. */
+#define EFX_MAX_TX_TC 2
+#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
+#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
+#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
+#define EFX_TXQ_TYPES 4
+#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
+
+/* Maximum possible MTU the driver supports */
+#define EFX_MAX_MTU (9 * 1024)
+
+/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page,
+ * and should be a multiple of the cache line size.
+ */
+#define EFX_RX_USR_BUF_SIZE (2048 - 256)
+
+/* If possible, we should ensure cache line alignment at start and end
+ * of every buffer. Otherwise, we just need to ensure 4-byte
+ * alignment of the network header.
+ */
+#if NET_IP_ALIGN == 0
+#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES
+#else
+#define EFX_RX_BUF_ALIGNMENT 4
+#endif
+
+/* Forward declare Precision Time Protocol (PTP) support structure. */
+struct efx_ptp_data;
+struct hwtstamp_config;
+
+struct efx_self_tests;
+
+/**
+ * struct efx_buffer - A general-purpose DMA buffer
+ * @addr: host base address of the buffer
+ * @dma_addr: DMA base address of the buffer
+ * @len: Buffer length, in bytes
+ *
+ * The NIC uses these buffers for its interrupt status registers and
+ * MAC stats dumps.
+ */
+struct efx_buffer {
+ void *addr;
+ dma_addr_t dma_addr;
+ unsigned int len;
+};
+
+/**
+ * struct efx_special_buffer - DMA buffer entered into buffer table
+ * @buf: Standard &struct efx_buffer
+ * @index: Buffer index within controller;s buffer table
+ * @entries: Number of buffer table entries
+ *
+ * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
+ * Event and descriptor rings are addressed via one or more buffer
+ * table entries (and so can be physically non-contiguous, although we
+ * currently do not take advantage of that). On Falcon and Siena we
+ * have to take care of allocating and initialising the entries
+ * ourselves. On later hardware this is managed by the firmware and
+ * @index and @entries are left as 0.
+ */
+struct efx_special_buffer {
+ struct efx_buffer buf;
+ unsigned int index;
+ unsigned int entries;
+};
+
+/**
+ * struct efx_tx_buffer - buffer state for a TX descriptor
+ * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
+ * freed when descriptor completes
+ * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
+ * freed when descriptor completes.
+ * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
+ * @dma_addr: DMA address of the fragment.
+ * @flags: Flags for allocation and DMA mapping type
+ * @len: Length of this fragment.
+ * This field is zero when the queue slot is empty.
+ * @unmap_len: Length of this fragment to unmap
+ * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
+ * Only valid if @unmap_len != 0.
+ */
+struct efx_tx_buffer {
+ union {
+ const struct sk_buff *skb;
+ void *heap_buf;
+ };
+ union {
+ efx_qword_t option;
+ dma_addr_t dma_addr;
+ };
+ unsigned short flags;
+ unsigned short len;
+ unsigned short unmap_len;
+ unsigned short dma_offset;
+};
+#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
+#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
+#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
+#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
+#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
+
+/**
+ * struct efx_tx_queue - An Efx TX queue
+ *
+ * This is a ring buffer of TX fragments.
+ * Since the TX completion path always executes on the same
+ * CPU and the xmit path can operate on different CPUs,
+ * performance is increased by ensuring that the completion
+ * path and the xmit path operate on different cache lines.
+ * This is particularly important if the xmit path is always
+ * executing on one CPU which is different from the completion
+ * path. There is also a cache line for members which are
+ * read but not written on the fast path.
+ *
+ * @efx: The associated Efx NIC
+ * @queue: DMA queue number
+ * @channel: The associated channel
+ * @core_txq: The networking core TX queue structure
+ * @buffer: The software buffer ring
+ * @tsoh_page: Array of pages of TSO header buffers
+ * @txd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
+ * @piobuf: PIO buffer region for this TX queue (shared with its partner).
+ * Size of the region is efx_piobuf_size.
+ * @piobuf_offset: Buffer offset to be specified in PIO descriptors
+ * @initialised: Has hardware queue been initialised?
+ * @read_count: Current read pointer.
+ * This is the number of buffers that have been removed from both rings.
+ * @old_write_count: The value of @write_count when last checked.
+ * This is here for performance reasons. The xmit path will
+ * only get the up-to-date value of @write_count if this
+ * variable indicates that the queue is empty. This is to
+ * avoid cache-line ping-pong between the xmit path and the
+ * completion path.
+ * @merge_events: Number of TX merged completion events
+ * @insert_count: Current insert pointer
+ * This is the number of buffers that have been added to the
+ * software ring.
+ * @write_count: Current write pointer
+ * This is the number of buffers that have been added to the
+ * hardware ring.
+ * @old_read_count: The value of read_count when last checked.
+ * This is here for performance reasons. The xmit path will
+ * only get the up-to-date value of read_count if this
+ * variable indicates that the queue is full. This is to
+ * avoid cache-line ping-pong between the xmit path and the
+ * completion path.
+ * @tso_bursts: Number of times TSO xmit invoked by kernel
+ * @tso_long_headers: Number of packets with headers too long for standard
+ * blocks
+ * @tso_packets: Number of packets via the TSO xmit path
+ * @pushes: Number of times the TX push feature has been used
+ * @pio_packets: Number of times the TX PIO feature has been used
+ * @empty_read_count: If the completion path has seen the queue as empty
+ * and the transmission path has not yet checked this, the value of
+ * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
+ */
+struct efx_tx_queue {
+ /* Members which don't change on the fast path */
+ struct efx_nic *efx ____cacheline_aligned_in_smp;
+ unsigned queue;
+ struct efx_channel *channel;
+ struct netdev_queue *core_txq;
+ struct efx_tx_buffer *buffer;
+ struct efx_buffer *tsoh_page;
+ struct efx_special_buffer txd;
+ unsigned int ptr_mask;
+ void __iomem *piobuf;
+ unsigned int piobuf_offset;
+ bool initialised;
+
+ /* Members used mainly on the completion path */
+ unsigned int read_count ____cacheline_aligned_in_smp;
+ unsigned int old_write_count;
+ unsigned int merge_events;
+
+ /* Members used only on the xmit path */
+ unsigned int insert_count ____cacheline_aligned_in_smp;
+ unsigned int write_count;
+ unsigned int old_read_count;
+ unsigned int tso_bursts;
+ unsigned int tso_long_headers;
+ unsigned int tso_packets;
+ unsigned int pushes;
+ unsigned int pio_packets;
+ /* Statistics to supplement MAC stats */
+ unsigned long tx_packets;
+
+ /* Members shared between paths and sometimes updated */
+ unsigned int empty_read_count ____cacheline_aligned_in_smp;
+#define EFX_EMPTY_COUNT_VALID 0x80000000
+ atomic_t flush_outstanding;
+};
+
+/**
+ * struct efx_rx_buffer - An Efx RX data buffer
+ * @dma_addr: DMA base address of the buffer
+ * @page: The associated page buffer.
+ * Will be %NULL if the buffer slot is currently free.
+ * @page_offset: If pending: offset in @page of DMA base address.
+ * If completed: offset in @page of Ethernet header.
+ * @len: If pending: length for DMA descriptor.
+ * If completed: received length, excluding hash prefix.
+ * @flags: Flags for buffer and packet state. These are only set on the
+ * first buffer of a scattered packet.
+ */
+struct efx_rx_buffer {
+ dma_addr_t dma_addr;
+ struct page *page;
+ u16 page_offset;
+ u16 len;
+ u16 flags;
+};
+#define EFX_RX_BUF_LAST_IN_PAGE 0x0001
+#define EFX_RX_PKT_CSUMMED 0x0002
+#define EFX_RX_PKT_DISCARD 0x0004
+#define EFX_RX_PKT_TCP 0x0040
+#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
+
+/**
+ * struct efx_rx_page_state - Page-based rx buffer state
+ *
+ * Inserted at the start of every page allocated for receive buffers.
+ * Used to facilitate sharing dma mappings between recycled rx buffers
+ * and those passed up to the kernel.
+ *
+ * @dma_addr: The dma address of this page.
+ */
+struct efx_rx_page_state {
+ dma_addr_t dma_addr;
+
+ unsigned int __pad[0] ____cacheline_aligned;
+};
+
+/**
+ * struct efx_rx_queue - An Efx RX queue
+ * @efx: The associated Efx NIC
+ * @core_index: Index of network core RX queue. Will be >= 0 iff this
+ * is associated with a real RX queue.
+ * @buffer: The software buffer ring
+ * @rxd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
+ * @refill_enabled: Enable refill whenever fill level is low
+ * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
+ * @rxq_flush_pending.
+ * @added_count: Number of buffers added to the receive queue.
+ * @notified_count: Number of buffers given to NIC (<= @added_count).
+ * @removed_count: Number of buffers removed from the receive queue.
+ * @scatter_n: Used by NIC specific receive code.
+ * @scatter_len: Used by NIC specific receive code.
+ * @page_ring: The ring to store DMA mapped pages for reuse.
+ * @page_add: Counter to calculate the write pointer for the recycle ring.
+ * @page_remove: Counter to calculate the read pointer for the recycle ring.
+ * @page_recycle_count: The number of pages that have been recycled.
+ * @page_recycle_failed: The number of pages that couldn't be recycled because
+ * the kernel still held a reference to them.
+ * @page_recycle_full: The number of pages that were released because the
+ * recycle ring was full.
+ * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
+ * @max_fill: RX descriptor maximum fill level (<= ring size)
+ * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
+ * (<= @max_fill)
+ * @min_fill: RX descriptor minimum non-zero fill level.
+ * This records the minimum fill level observed when a ring
+ * refill was triggered.
+ * @recycle_count: RX buffer recycle counter.
+ * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
+ */
+struct efx_rx_queue {
+ struct efx_nic *efx;
+ int core_index;
+ struct efx_rx_buffer *buffer;
+ struct efx_special_buffer rxd;
+ unsigned int ptr_mask;
+ bool refill_enabled;
+ bool flush_pending;
+
+ unsigned int added_count;
+ unsigned int notified_count;
+ unsigned int removed_count;
+ unsigned int scatter_n;
+ unsigned int scatter_len;
+ struct page **page_ring;
+ unsigned int page_add;
+ unsigned int page_remove;
+ unsigned int page_recycle_count;
+ unsigned int page_recycle_failed;
+ unsigned int page_recycle_full;
+ unsigned int page_ptr_mask;
+ unsigned int max_fill;
+ unsigned int fast_fill_trigger;
+ unsigned int min_fill;
+ unsigned int min_overfill;
+ unsigned int recycle_count;
+ struct timer_list slow_fill;
+ unsigned int slow_fill_count;
+ /* Statistics to supplement MAC stats */
+ unsigned long rx_packets;
+};
+
+enum efx_sync_events_state {
+ SYNC_EVENTS_DISABLED = 0,
+ SYNC_EVENTS_QUIESCENT,
+ SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID,
+};
+
+/**
+ * struct efx_channel - An Efx channel
+ *
+ * A channel comprises an event queue, at least one TX queue, at least
+ * one RX queue, and an associated tasklet for processing the event
+ * queue.
+ *
+ * @efx: Associated Efx NIC
+ * @channel: Channel instance number
+ * @type: Channel type definition
+ * @eventq_init: Event queue initialised flag
+ * @enabled: Channel enabled indicator
+ * @irq: IRQ number (MSI and MSI-X only)
+ * @irq_moderation: IRQ moderation value (in hardware ticks)
+ * @napi_dev: Net device used with NAPI
+ * @napi_str: NAPI control structure
+ * @state: state for NAPI vs busy polling
+ * @state_lock: lock protecting @state
+ * @eventq: Event queue buffer
+ * @eventq_mask: Event queue pointer mask
+ * @eventq_read_ptr: Event queue read pointer
+ * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
+ * @irq_count: Number of IRQs since last adaptive moderation decision
+ * @irq_mod_score: IRQ moderation score
+ * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
+ * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
+ * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
+ * @n_rx_mcast_mismatch: Count of unmatched multicast frames
+ * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
+ * @n_rx_overlength: Count of RX_OVERLENGTH errors
+ * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
+ * lack of descriptors
+ * @n_rx_merge_events: Number of RX merged completion events
+ * @n_rx_merge_packets: Number of RX packets completed by merged events
+ * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
+ * __efx_rx_packet(), or zero if there is none
+ * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
+ * by __efx_rx_packet(), if @rx_pkt_n_frags != 0
+ * @rx_queue: RX queue for this channel
+ * @tx_queue: TX queues for this channel
+ * @sync_events_state: Current state of sync events on this channel
+ * @sync_timestamp_major: Major part of the last ptp sync event
+ * @sync_timestamp_minor: Minor part of the last ptp sync event
+ */
+struct efx_channel {
+ struct efx_nic *efx;
+ int channel;
+ const struct efx_channel_type *type;
+ bool eventq_init;
+ bool enabled;
+ int irq;
+ unsigned int irq_moderation;
+ struct net_device *napi_dev;
+ struct napi_struct napi_str;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int state;
+ spinlock_t state_lock;
+#define EFX_CHANNEL_STATE_IDLE 0
+#define EFX_CHANNEL_STATE_NAPI (1 << 0) /* NAPI owns this channel */
+#define EFX_CHANNEL_STATE_POLL (1 << 1) /* poll owns this channel */
+#define EFX_CHANNEL_STATE_DISABLED (1 << 2) /* channel is disabled */
+#define EFX_CHANNEL_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this channel */
+#define EFX_CHANNEL_STATE_POLL_YIELD (1 << 4) /* poll yielded this channel */
+#define EFX_CHANNEL_OWNED \
+ (EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL)
+#define EFX_CHANNEL_LOCKED \
+ (EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED)
+#define EFX_CHANNEL_USER_PEND \
+ (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD)
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+ struct efx_special_buffer eventq;
+ unsigned int eventq_mask;
+ unsigned int eventq_read_ptr;
+ int event_test_cpu;
+
+ unsigned int irq_count;
+ unsigned int irq_mod_score;
+#ifdef CONFIG_RFS_ACCEL
+ unsigned int rfs_filters_added;
+#endif
+
+ unsigned n_rx_tobe_disc;
+ unsigned n_rx_ip_hdr_chksum_err;
+ unsigned n_rx_tcp_udp_chksum_err;
+ unsigned n_rx_mcast_mismatch;
+ unsigned n_rx_frm_trunc;
+ unsigned n_rx_overlength;
+ unsigned n_skbuff_leaks;
+ unsigned int n_rx_nodesc_trunc;
+ unsigned int n_rx_merge_events;
+ unsigned int n_rx_merge_packets;
+
+ unsigned int rx_pkt_n_frags;
+ unsigned int rx_pkt_index;
+
+ struct efx_rx_queue rx_queue;
+ struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
+
+ enum efx_sync_events_state sync_events_state;
+ u32 sync_timestamp_major;
+ u32 sync_timestamp_minor;
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void efx_channel_init_lock(struct efx_channel *channel)
+{
+ spin_lock_init(&channel->state_lock);
+}
+
+/* Called from the device poll routine to get ownership of a channel. */
+static inline bool efx_channel_lock_napi(struct efx_channel *channel)
+{
+ bool rc = true;
+
+ spin_lock_bh(&channel->state_lock);
+ if (channel->state & EFX_CHANNEL_LOCKED) {
+ WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
+ channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD;
+ rc = false;
+ } else {
+ /* we don't care if someone yielded */
+ channel->state = EFX_CHANNEL_STATE_NAPI;
+ }
+ spin_unlock_bh(&channel->state_lock);
+ return rc;
+}
+
+static inline void efx_channel_unlock_napi(struct efx_channel *channel)
+{
+ spin_lock_bh(&channel->state_lock);
+ WARN_ON(channel->state &
+ (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD));
+
+ channel->state &= EFX_CHANNEL_STATE_DISABLED;
+ spin_unlock_bh(&channel->state_lock);
+}
+
+/* Called from efx_busy_poll(). */
+static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+{
+ bool rc = true;
+
+ spin_lock_bh(&channel->state_lock);
+ if ((channel->state & EFX_CHANNEL_LOCKED)) {
+ channel->state |= EFX_CHANNEL_STATE_POLL_YIELD;
+ rc = false;
+ } else {
+ /* preserve yield marks */
+ channel->state |= EFX_CHANNEL_STATE_POLL;
+ }
+ spin_unlock_bh(&channel->state_lock);
+ return rc;
+}
+
+/* Returns true if NAPI tried to get the channel while it was locked. */
+static inline void efx_channel_unlock_poll(struct efx_channel *channel)
+{
+ spin_lock_bh(&channel->state_lock);
+ WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
+
+ /* will reset state to idle, unless channel is disabled */
+ channel->state &= EFX_CHANNEL_STATE_DISABLED;
+ spin_unlock_bh(&channel->state_lock);
+}
+
+/* True if a socket is polling, even if it did not get the lock. */
+static inline bool efx_channel_busy_polling(struct efx_channel *channel)
+{
+ WARN_ON(!(channel->state & EFX_CHANNEL_OWNED));
+ return channel->state & EFX_CHANNEL_USER_PEND;
+}
+
+static inline void efx_channel_enable(struct efx_channel *channel)
+{
+ spin_lock_bh(&channel->state_lock);
+ channel->state = EFX_CHANNEL_STATE_IDLE;
+ spin_unlock_bh(&channel->state_lock);
+}
+
+/* False if the channel is currently owned. */
+static inline bool efx_channel_disable(struct efx_channel *channel)
+{
+ bool rc = true;
+
+ spin_lock_bh(&channel->state_lock);
+ if (channel->state & EFX_CHANNEL_OWNED)
+ rc = false;
+ channel->state |= EFX_CHANNEL_STATE_DISABLED;
+ spin_unlock_bh(&channel->state_lock);
+
+ return rc;
+}
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+
+static inline void efx_channel_init_lock(struct efx_channel *channel)
+{
+}
+
+static inline bool efx_channel_lock_napi(struct efx_channel *channel)
+{
+ return true;
+}
+
+static inline void efx_channel_unlock_napi(struct efx_channel *channel)
+{
+}
+
+static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+{
+ return false;
+}
+
+static inline void efx_channel_unlock_poll(struct efx_channel *channel)
+{
+}
+
+static inline bool efx_channel_busy_polling(struct efx_channel *channel)
+{
+ return false;
+}
+
+static inline void efx_channel_enable(struct efx_channel *channel)
+{
+}
+
+static inline bool efx_channel_disable(struct efx_channel *channel)
+{
+ return true;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+/**
+ * struct efx_msi_context - Context for each MSI
+ * @efx: The associated NIC
+ * @index: Index of the channel/IRQ
+ * @name: Name of the channel/IRQ
+ *
+ * Unlike &struct efx_channel, this is never reallocated and is always
+ * safe for the IRQ handler to access.
+ */
+struct efx_msi_context {
+ struct efx_nic *efx;
+ unsigned int index;
+ char name[IFNAMSIZ + 6];
+};
+
+/**
+ * struct efx_channel_type - distinguishes traffic and extra channels
+ * @handle_no_channel: Handle failure to allocate an extra channel
+ * @pre_probe: Set up extra state prior to initialisation
+ * @post_remove: Tear down extra state after finalisation, if allocated.
+ * May be called on channels that have not been probed.
+ * @get_name: Generate the channel's name (used for its IRQ handler)
+ * @copy: Copy the channel state prior to reallocation. May be %NULL if
+ * reallocation is not supported.
+ * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
+ * @keep_eventq: Flag for whether event queue should be kept initialised
+ * while the device is stopped
+ */
+struct efx_channel_type {
+ void (*handle_no_channel)(struct efx_nic *);
+ int (*pre_probe)(struct efx_channel *);
+ void (*post_remove)(struct efx_channel *);
+ void (*get_name)(struct efx_channel *, char *buf, size_t len);
+ struct efx_channel *(*copy)(const struct efx_channel *);
+ bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
+ bool keep_eventq;
+};
+
+enum efx_led_mode {
+ EFX_LED_OFF = 0,
+ EFX_LED_ON = 1,
+ EFX_LED_DEFAULT = 2
+};
+
+#define STRING_TABLE_LOOKUP(val, member) \
+ ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
+
+extern const char *const efx_loopback_mode_names[];
+extern const unsigned int efx_loopback_mode_max;
+#define LOOPBACK_MODE(efx) \
+ STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
+
+extern const char *const efx_reset_type_names[];
+extern const unsigned int efx_reset_type_max;
+#define RESET_TYPE(type) \
+ STRING_TABLE_LOOKUP(type, efx_reset_type)
+
+enum efx_int_mode {
+ /* Be careful if altering to correct macro below */
+ EFX_INT_MODE_MSIX = 0,
+ EFX_INT_MODE_MSI = 1,
+ EFX_INT_MODE_LEGACY = 2,
+ EFX_INT_MODE_MAX /* Insert any new items before this */
+};
+#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
+
+enum nic_state {
+ STATE_UNINIT = 0, /* device being probed/removed or is frozen */
+ STATE_READY = 1, /* hardware ready and netdev registered */
+ STATE_DISABLED = 2, /* device disabled due to hardware errors */
+ STATE_RECOVERY = 3, /* device recovering from PCI error */
+};
+
+/* Forward declaration */
+struct efx_nic;
+
+/* Pseudo bit-mask flow control field */
+#define EFX_FC_RX FLOW_CTRL_RX
+#define EFX_FC_TX FLOW_CTRL_TX
+#define EFX_FC_AUTO 4
+
+/**
+ * struct efx_link_state - Current state of the link
+ * @up: Link is up
+ * @fd: Link is full-duplex
+ * @fc: Actual flow control flags
+ * @speed: Link speed (Mbps)
+ */
+struct efx_link_state {
+ bool up;
+ bool fd;
+ u8 fc;
+ unsigned int speed;
+};
+
+static inline bool efx_link_state_equal(const struct efx_link_state *left,
+ const struct efx_link_state *right)
+{
+ return left->up == right->up && left->fd == right->fd &&
+ left->fc == right->fc && left->speed == right->speed;
+}
+
+/**
+ * struct efx_phy_operations - Efx PHY operations table
+ * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
+ * efx->loopback_modes.
+ * @init: Initialise PHY
+ * @fini: Shut down PHY
+ * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
+ * @poll: Update @link_state and report whether it changed.
+ * Serialised by the mac_lock.
+ * @get_settings: Get ethtool settings. Serialised by the mac_lock.
+ * @set_settings: Set ethtool settings. Serialised by the mac_lock.
+ * @set_npage_adv: Set abilities advertised in (Extended) Next Page
+ * (only needed where AN bit is set in mmds)
+ * @test_alive: Test that PHY is 'alive' (online)
+ * @test_name: Get the name of a PHY-specific test/result
+ * @run_tests: Run tests and record results as appropriate (offline).
+ * Flags are the ethtool tests flags.
+ */
+struct efx_phy_operations {
+ int (*probe) (struct efx_nic *efx);
+ int (*init) (struct efx_nic *efx);
+ void (*fini) (struct efx_nic *efx);
+ void (*remove) (struct efx_nic *efx);
+ int (*reconfigure) (struct efx_nic *efx);
+ bool (*poll) (struct efx_nic *efx);
+ void (*get_settings) (struct efx_nic *efx,
+ struct ethtool_cmd *ecmd);
+ int (*set_settings) (struct efx_nic *efx,
+ struct ethtool_cmd *ecmd);
+ void (*set_npage_adv) (struct efx_nic *efx, u32);
+ int (*test_alive) (struct efx_nic *efx);
+ const char *(*test_name) (struct efx_nic *efx, unsigned int index);
+ int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
+ int (*get_module_eeprom) (struct efx_nic *efx,
+ struct ethtool_eeprom *ee,
+ u8 *data);
+ int (*get_module_info) (struct efx_nic *efx,
+ struct ethtool_modinfo *modinfo);
+};
+
+/**
+ * enum efx_phy_mode - PHY operating mode flags
+ * @PHY_MODE_NORMAL: on and should pass traffic
+ * @PHY_MODE_TX_DISABLED: on with TX disabled
+ * @PHY_MODE_LOW_POWER: set to low power through MDIO
+ * @PHY_MODE_OFF: switched off through external control
+ * @PHY_MODE_SPECIAL: on but will not pass traffic
+ */
+enum efx_phy_mode {
+ PHY_MODE_NORMAL = 0,
+ PHY_MODE_TX_DISABLED = 1,
+ PHY_MODE_LOW_POWER = 2,
+ PHY_MODE_OFF = 4,
+ PHY_MODE_SPECIAL = 8,
+};
+
+static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
+{
+ return !!(mode & ~PHY_MODE_TX_DISABLED);
+}
+
+/**
+ * struct efx_hw_stat_desc - Description of a hardware statistic
+ * @name: Name of the statistic as visible through ethtool, or %NULL if
+ * it should not be exposed
+ * @dma_width: Width in bits (0 for non-DMA statistics)
+ * @offset: Offset within stats (ignored for non-DMA statistics)
+ */
+struct efx_hw_stat_desc {
+ const char *name;
+ u16 dma_width;
+ u16 offset;
+};
+
+/* Number of bits used in a multicast filter hash address */
+#define EFX_MCAST_HASH_BITS 8
+
+/* Number of (single-bit) entries in a multicast filter hash */
+#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
+
+/* An Efx multicast filter hash */
+union efx_multicast_hash {
+ u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
+ efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
+};
+
+struct efx_vf;
+struct vfdi_status;
+
+/**
+ * struct efx_nic - an Efx NIC
+ * @name: Device name (net device name or bus id before net device registered)
+ * @pci_dev: The PCI device
+ * @node: List node for maintaning primary/secondary function lists
+ * @primary: &struct efx_nic instance for the primary function of this
+ * controller. May be the same structure, and may be %NULL if no
+ * primary function is bound. Serialised by rtnl_lock.
+ * @secondary_list: List of &struct efx_nic instances for the secondary PCI
+ * functions of the controller, if this is for the primary function.
+ * Serialised by rtnl_lock.
+ * @type: Controller type attributes
+ * @legacy_irq: IRQ number
+ * @workqueue: Workqueue for port reconfigures and the HW monitor.
+ * Work items do not hold and must not acquire RTNL.
+ * @workqueue_name: Name of workqueue
+ * @reset_work: Scheduled reset workitem
+ * @membase_phys: Memory BAR value as physical address
+ * @membase: Memory BAR value
+ * @interrupt_mode: Interrupt mode
+ * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
+ * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
+ * @irq_rx_moderation: IRQ moderation time for RX event queues
+ * @msg_enable: Log message enable flags
+ * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
+ * @reset_pending: Bitmask for pending resets
+ * @tx_queue: TX DMA queues
+ * @rx_queue: RX DMA queues
+ * @channel: Channels
+ * @msi_context: Context for each MSI
+ * @extra_channel_types: Types of extra (non-traffic) channels that
+ * should be allocated for this NIC
+ * @rxq_entries: Size of receive queues requested by user.
+ * @txq_entries: Size of transmit queues requested by user.
+ * @txq_stop_thresh: TX queue fill level at or above which we stop it.
+ * @txq_wake_thresh: TX queue fill level at or below which we wake it.
+ * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
+ * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
+ * @sram_lim_qw: Qword address limit of SRAM
+ * @next_buffer_table: First available buffer table id
+ * @n_channels: Number of channels in use
+ * @n_rx_channels: Number of channels used for RX (= number of RX queues)
+ * @n_tx_channels: Number of channels used for TX
+ * @rx_ip_align: RX DMA address offset to have IP header aligned in
+ * in accordance with NET_IP_ALIGN
+ * @rx_dma_len: Current maximum RX DMA length
+ * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
+ * for use in sk_buff::truesize
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
+ * (valid only if @rx_prefix_size != 0; always negative)
+ * @rx_packet_len_offset: Offset of RX packet length from start of packet data
+ * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
+ * @rx_packet_ts_offset: Offset of timestamp from start of packet data
+ * (valid only if channel->sync_timestamps_enabled; always negative)
+ * @rx_hash_key: Toeplitz hash key for RSS
+ * @rx_indir_table: Indirection table for RSS
+ * @rx_scatter: Scatter mode enabled for receives
+ * @int_error_count: Number of internal errors seen recently
+ * @int_error_expire: Time at which error count will be expired
+ * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
+ * acknowledge but do nothing else.
+ * @irq_status: Interrupt status buffer
+ * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
+ * @irq_level: IRQ level/index for IRQs not triggered by an event queue
+ * @selftest_work: Work item for asynchronous self-test
+ * @mtd_list: List of MTDs attached to the NIC
+ * @nic_data: Hardware dependent state
+ * @mcdi: Management-Controller-to-Driver Interface state
+ * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
+ * efx_monitor() and efx_reconfigure_port()
+ * @port_enabled: Port enabled indicator.
+ * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
+ * efx_mac_work() with kernel interfaces. Safe to read under any
+ * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
+ * be held to modify it.
+ * @port_initialized: Port initialized?
+ * @net_dev: Operating system network device. Consider holding the rtnl lock
+ * @stats_buffer: DMA buffer for statistics
+ * @phy_type: PHY type
+ * @phy_op: PHY interface
+ * @phy_data: PHY private data (including PHY-specific stats)
+ * @mdio: PHY MDIO interface
+ * @mdio_bus: PHY MDIO bus ID (only used by Siena)
+ * @phy_mode: PHY operating mode. Serialised by @mac_lock.
+ * @link_advertising: Autonegotiation advertising flags
+ * @link_state: Current state of the link
+ * @n_link_state_changes: Number of times the link has changed state
+ * @unicast_filter: Flag for Falcon-arch simple unicast filter.
+ * Protected by @mac_lock.
+ * @multicast_hash: Multicast hash table for Falcon-arch.
+ * Protected by @mac_lock.
+ * @wanted_fc: Wanted flow control flags
+ * @fc_disable: When non-zero flow control is disabled. Typically used to
+ * ensure that network back pressure doesn't delay dma queue flushes.
+ * Serialised by the rtnl lock.
+ * @mac_work: Work item for changing MAC promiscuity and multicast hash
+ * @loopback_mode: Loopback status
+ * @loopback_modes: Supported loopback mode bitmask
+ * @loopback_selftest: Offline self-test private state
+ * @filter_lock: Filter table lock
+ * @filter_state: Architecture-dependent filter table state
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
+ * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
+ * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
+ * Decremented when the efx_flush_rx_queue() is called.
+ * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
+ * completed (either success or failure). Not used when MCDI is used to
+ * flush receive queues.
+ * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
+ * @vf: Array of &struct efx_vf objects.
+ * @vf_count: Number of VFs intended to be enabled.
+ * @vf_init_count: Number of VFs that have been fully initialised.
+ * @vi_scale: log2 number of vnics per VF.
+ * @ptp_data: PTP state data
+ * @vpd_sn: Serial number read from VPD
+ * @monitor_work: Hardware monitor workitem
+ * @biu_lock: BIU (bus interface unit) lock
+ * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
+ * field is used by efx_test_interrupts() to verify that an
+ * interrupt has occurred.
+ * @stats_lock: Statistics update lock. Must be held when calling
+ * efx_nic_type::{update,start,stop}_stats.
+ * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
+ *
+ * This is stored in the private area of the &struct net_device.
+ */
+struct efx_nic {
+ /* The following fields should be written very rarely */
+
+ char name[IFNAMSIZ];
+ struct list_head node;
+ struct efx_nic *primary;
+ struct list_head secondary_list;
+ struct pci_dev *pci_dev;
+ unsigned int port_num;
+ const struct efx_nic_type *type;
+ int legacy_irq;
+ bool eeh_disabled_legacy_irq;
+ struct workqueue_struct *workqueue;
+ char workqueue_name[16];
+ struct work_struct reset_work;
+ resource_size_t membase_phys;
+ void __iomem *membase;
+
+ enum efx_int_mode interrupt_mode;
+ unsigned int timer_quantum_ns;
+ bool irq_rx_adaptive;
+ unsigned int irq_rx_moderation;
+ u32 msg_enable;
+
+ enum nic_state state;
+ unsigned long reset_pending;
+
+ struct efx_channel *channel[EFX_MAX_CHANNELS];
+ struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
+ const struct efx_channel_type *
+ extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
+
+ unsigned rxq_entries;
+ unsigned txq_entries;
+ unsigned int txq_stop_thresh;
+ unsigned int txq_wake_thresh;
+
+ unsigned tx_dc_base;
+ unsigned rx_dc_base;
+ unsigned sram_lim_qw;
+ unsigned next_buffer_table;
+
+ unsigned int max_channels;
+ unsigned n_channels;
+ unsigned n_rx_channels;
+ unsigned rss_spread;
+ unsigned tx_channel_offset;
+ unsigned n_tx_channels;
+ unsigned int rx_ip_align;
+ unsigned int rx_dma_len;
+ unsigned int rx_buffer_order;
+ unsigned int rx_buffer_truesize;
+ unsigned int rx_page_buf_step;
+ unsigned int rx_bufs_per_page;
+ unsigned int rx_pages_per_batch;
+ unsigned int rx_prefix_size;
+ int rx_packet_hash_offset;
+ int rx_packet_len_offset;
+ int rx_packet_ts_offset;
+ u8 rx_hash_key[40];
+ u32 rx_indir_table[128];
+ bool rx_scatter;
+
+ unsigned int_error_count;
+ unsigned long int_error_expire;
+
+ bool irq_soft_enabled;
+ struct efx_buffer irq_status;
+ unsigned irq_zero_count;
+ unsigned irq_level;
+ struct delayed_work selftest_work;
+
+#ifdef CONFIG_SFC_MTD
+ struct list_head mtd_list;
+#endif
+
+ void *nic_data;
+ struct efx_mcdi_data *mcdi;
+
+ struct mutex mac_lock;
+ struct work_struct mac_work;
+ bool port_enabled;
+
+ bool mc_bist_for_other_fn;
+ bool port_initialized;
+ struct net_device *net_dev;
+
+ struct efx_buffer stats_buffer;
+ u64 rx_nodesc_drops_total;
+ u64 rx_nodesc_drops_while_down;
+ bool rx_nodesc_drops_prev_state;
+
+ unsigned int phy_type;
+ const struct efx_phy_operations *phy_op;
+ void *phy_data;
+ struct mdio_if_info mdio;
+ unsigned int mdio_bus;
+ enum efx_phy_mode phy_mode;
+
+ u32 link_advertising;
+ struct efx_link_state link_state;
+ unsigned int n_link_state_changes;
+
+ bool unicast_filter;
+ union efx_multicast_hash multicast_hash;
+ u8 wanted_fc;
+ unsigned fc_disable;
+
+ atomic_t rx_reset;
+ enum efx_loopback_mode loopback_mode;
+ u64 loopback_modes;
+
+ void *loopback_selftest;
+
+ spinlock_t filter_lock;
+ void *filter_state;
+#ifdef CONFIG_RFS_ACCEL
+ u32 *rps_flow_id;
+ unsigned int rps_expire_index;
+#endif
+
+ atomic_t active_queues;
+ atomic_t rxq_flush_pending;
+ atomic_t rxq_flush_outstanding;
+ wait_queue_head_t flush_wq;
+
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_vf *vf;
+ unsigned vf_count;
+ unsigned vf_init_count;
+ unsigned vi_scale;
+#endif
+
+ struct efx_ptp_data *ptp_data;
+
+ char *vpd_sn;
+
+ /* The following fields may be written more often */
+
+ struct delayed_work monitor_work ____cacheline_aligned_in_smp;
+ spinlock_t biu_lock;
+ int last_irq_cpu;
+ spinlock_t stats_lock;
+ atomic_t n_rx_noskb_drops;
+};
+
+static inline int efx_dev_registered(struct efx_nic *efx)
+{
+ return efx->net_dev->reg_state == NETREG_REGISTERED;
+}
+
+static inline unsigned int efx_port_num(struct efx_nic *efx)
+{
+ return efx->port_num;
+}
+
+struct efx_mtd_partition {
+ struct list_head node;
+ struct mtd_info mtd;
+ const char *dev_type_name;
+ const char *type_name;
+ char name[IFNAMSIZ + 20];
+};
+
+/**
+ * struct efx_nic_type - Efx device type definition
+ * @mem_map_size: Get memory BAR mapped size
+ * @probe: Probe the controller
+ * @remove: Free resources allocated by probe()
+ * @init: Initialise the controller
+ * @dimension_resources: Dimension controller resources (buffer table,
+ * and VIs once the available interrupt resources are clear)
+ * @fini: Shut down the controller
+ * @monitor: Periodic function for polling link state and hardware monitor
+ * @map_reset_reason: Map ethtool reset reason to a reset method
+ * @map_reset_flags: Map ethtool reset flags to a reset method, if possible
+ * @reset: Reset the controller hardware and possibly the PHY. This will
+ * be called while the controller is uninitialised.
+ * @probe_port: Probe the MAC and PHY
+ * @remove_port: Free resources allocated by probe_port()
+ * @handle_global_event: Handle a "global" event (may be %NULL)
+ * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
+ * @prepare_flush: Prepare the hardware for flushing the DMA queues
+ * (for Falcon architecture)
+ * @finish_flush: Clean up after flushing the DMA queues (for Falcon
+ * architecture)
+ * @prepare_flr: Prepare for an FLR
+ * @finish_flr: Clean up after an FLR
+ * @describe_stats: Describe statistics for ethtool
+ * @update_stats: Update statistics not provided by event handling.
+ * Either argument may be %NULL.
+ * @start_stats: Start the regular fetching of statistics
+ * @pull_stats: Pull stats from the NIC and wait until they arrive.
+ * @stop_stats: Stop the regular fetching of statistics
+ * @set_id_led: Set state of identifying LED or revert to automatic function
+ * @push_irq_moderation: Apply interrupt moderation value
+ * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
+ * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
+ * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
+ * to the hardware. Serialised by the mac_lock.
+ * @check_mac_fault: Check MAC fault state. True if fault present.
+ * @get_wol: Get WoL configuration from driver state
+ * @set_wol: Push WoL configuration to the NIC
+ * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
+ * @test_chip: Test registers. May use efx_farch_test_registers(), and is
+ * expected to reset the NIC.
+ * @test_nvram: Test validity of NVRAM contents
+ * @mcdi_request: Send an MCDI request with the given header and SDU.
+ * The SDU length may be any value from 0 up to the protocol-
+ * defined maximum, but its buffer will be padded to a multiple
+ * of 4 bytes.
+ * @mcdi_poll_response: Test whether an MCDI response is available.
+ * @mcdi_read_response: Read the MCDI response PDU. The offset will
+ * be a multiple of 4. The length may not be, but the buffer
+ * will be padded so it is safe to round up.
+ * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so,
+ * return an appropriate error code for aborting any current
+ * request; otherwise return 0.
+ * @irq_enable_master: Enable IRQs on the NIC. Each event queue must
+ * be separately enabled after this.
+ * @irq_test_generate: Generate a test IRQ
+ * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event
+ * queue must be separately disabled before this.
+ * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is
+ * a pointer to the &struct efx_msi_context for the channel.
+ * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
+ * is a pointer to the &struct efx_nic.
+ * @tx_probe: Allocate resources for TX queue
+ * @tx_init: Initialise TX queue on the NIC
+ * @tx_remove: Free resources for TX queue
+ * @tx_write: Write TX descriptors and doorbell
+ * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
+ * @rx_probe: Allocate resources for RX queue
+ * @rx_init: Initialise RX queue on the NIC
+ * @rx_remove: Free resources for RX queue
+ * @rx_write: Write RX descriptors and doorbell
+ * @rx_defer_refill: Generate a refill reminder event
+ * @ev_probe: Allocate resources for event queue
+ * @ev_init: Initialise event queue on the NIC
+ * @ev_fini: Deinitialise event queue on the NIC
+ * @ev_remove: Free resources for event queue
+ * @ev_process: Process events for a queue, up to the given NAPI quota
+ * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
+ * @ev_test_generate: Generate a test event
+ * @filter_table_probe: Probe filter capabilities and set up filter software state
+ * @filter_table_restore: Restore filters removed from hardware
+ * @filter_table_remove: Remove filters from hardware and tear down software state
+ * @filter_update_rx_scatter: Update filters after change to rx scatter setting
+ * @filter_insert: add or replace a filter
+ * @filter_remove_safe: remove a filter by ID, carefully
+ * @filter_get_safe: retrieve a filter by ID, carefully
+ * @filter_clear_rx: Remove all RX filters whose priority is less than or
+ * equal to the given priority and is not %EFX_FILTER_PRI_AUTO
+ * @filter_count_rx_used: Get the number of filters in use at a given priority
+ * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
+ * @filter_get_rx_ids: Get list of RX filters at a given priority
+ * @filter_rfs_insert: Add or replace a filter for RFS. This must be
+ * atomic. The hardware change may be asynchronous but should
+ * not be delayed for long. It may fail if this can't be done
+ * atomically.
+ * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
+ * This must check whether the specified table entry is used by RFS
+ * and that rps_may_expire_flow() returns true for it.
+ * @mtd_probe: Probe and add MTD partitions associated with this net device,
+ * using efx_mtd_add()
+ * @mtd_rename: Set an MTD partition name using the net device name
+ * @mtd_read: Read from an MTD partition
+ * @mtd_erase: Erase part of an MTD partition
+ * @mtd_write: Write to an MTD partition
+ * @mtd_sync: Wait for write-back to complete on MTD partition. This
+ * also notifies the driver that a writer has finished using this
+ * partition.
+ * @ptp_write_host_time: Send host time to MC as part of sync protocol
+ * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX
+ * timestamping, possibly only temporarily for the purposes of a reset.
+ * @ptp_set_ts_config: Set hardware timestamp configuration. The flags
+ * and tx_type will already have been validated but this operation
+ * must validate and update rx_filter.
+ * @revision: Hardware architecture revision
+ * @txd_ptr_tbl_base: TX descriptor ring base address
+ * @rxd_ptr_tbl_base: RX descriptor ring base address
+ * @buf_tbl_base: Buffer table base address
+ * @evq_ptr_tbl_base: Event queue pointer table base address
+ * @evq_rptr_tbl_base: Event queue read-pointer table base address
+ * @max_dma_mask: Maximum possible DMA mask
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_hash_offset: Offset of RX flow hash within prefix
+ * @rx_ts_offset: Offset of timestamp within prefix
+ * @rx_buffer_padding: Size of padding at end of RX packet
+ * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
+ * @always_rx_scatter: NIC will always scatter packets to multiple buffers
+ * @max_interrupt_mode: Highest capability interrupt mode supported
+ * from &enum efx_init_mode.
+ * @timer_period_max: Maximum period of interrupt timer (in ticks)
+ * @offload_features: net_device feature flags for protocol offload
+ * features implemented in hardware
+ * @mcdi_max_ver: Maximum MCDI version supported
+ * @hwtstamp_filters: Mask of hardware timestamp filter types supported
+ */
+struct efx_nic_type {
+ unsigned int (*mem_map_size)(struct efx_nic *efx);
+ int (*probe)(struct efx_nic *efx);
+ void (*remove)(struct efx_nic *efx);
+ int (*init)(struct efx_nic *efx);
+ int (*dimension_resources)(struct efx_nic *efx);
+ void (*fini)(struct efx_nic *efx);
+ void (*monitor)(struct efx_nic *efx);
+ enum reset_type (*map_reset_reason)(enum reset_type reason);
+ int (*map_reset_flags)(u32 *flags);
+ int (*reset)(struct efx_nic *efx, enum reset_type method);
+ int (*probe_port)(struct efx_nic *efx);
+ void (*remove_port)(struct efx_nic *efx);
+ bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
+ int (*fini_dmaq)(struct efx_nic *efx);
+ void (*prepare_flush)(struct efx_nic *efx);
+ void (*finish_flush)(struct efx_nic *efx);
+ void (*prepare_flr)(struct efx_nic *efx);
+ void (*finish_flr)(struct efx_nic *efx);
+ size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+ size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats);
+ void (*start_stats)(struct efx_nic *efx);
+ void (*pull_stats)(struct efx_nic *efx);
+ void (*stop_stats)(struct efx_nic *efx);
+ void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
+ void (*push_irq_moderation)(struct efx_channel *channel);
+ int (*reconfigure_port)(struct efx_nic *efx);
+ void (*prepare_enable_fc_tx)(struct efx_nic *efx);
+ int (*reconfigure_mac)(struct efx_nic *efx);
+ bool (*check_mac_fault)(struct efx_nic *efx);
+ void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
+ int (*set_wol)(struct efx_nic *efx, u32 type);
+ void (*resume_wol)(struct efx_nic *efx);
+ int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
+ int (*test_nvram)(struct efx_nic *efx);
+ void (*mcdi_request)(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len);
+ bool (*mcdi_poll_response)(struct efx_nic *efx);
+ void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
+ size_t pdu_offset, size_t pdu_len);
+ int (*mcdi_poll_reboot)(struct efx_nic *efx);
+ void (*irq_enable_master)(struct efx_nic *efx);
+ void (*irq_test_generate)(struct efx_nic *efx);
+ void (*irq_disable_non_ev)(struct efx_nic *efx);
+ irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
+ irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
+ int (*tx_probe)(struct efx_tx_queue *tx_queue);
+ void (*tx_init)(struct efx_tx_queue *tx_queue);
+ void (*tx_remove)(struct efx_tx_queue *tx_queue);
+ void (*tx_write)(struct efx_tx_queue *tx_queue);
+ void (*rx_push_rss_config)(struct efx_nic *efx);
+ int (*rx_probe)(struct efx_rx_queue *rx_queue);
+ void (*rx_init)(struct efx_rx_queue *rx_queue);
+ void (*rx_remove)(struct efx_rx_queue *rx_queue);
+ void (*rx_write)(struct efx_rx_queue *rx_queue);
+ void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
+ int (*ev_probe)(struct efx_channel *channel);
+ int (*ev_init)(struct efx_channel *channel);
+ void (*ev_fini)(struct efx_channel *channel);
+ void (*ev_remove)(struct efx_channel *channel);
+ int (*ev_process)(struct efx_channel *channel, int quota);
+ void (*ev_read_ack)(struct efx_channel *channel);
+ void (*ev_test_generate)(struct efx_channel *channel);
+ int (*filter_table_probe)(struct efx_nic *efx);
+ void (*filter_table_restore)(struct efx_nic *efx);
+ void (*filter_table_remove)(struct efx_nic *efx);
+ void (*filter_update_rx_scatter)(struct efx_nic *efx);
+ s32 (*filter_insert)(struct efx_nic *efx,
+ struct efx_filter_spec *spec, bool replace);
+ int (*filter_remove_safe)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+ int (*filter_get_safe)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
+ int (*filter_clear_rx)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+ u32 (*filter_count_rx_used)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+ u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
+ s32 (*filter_get_rx_ids)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+#ifdef CONFIG_RFS_ACCEL
+ s32 (*filter_rfs_insert)(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+ bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+#ifdef CONFIG_SFC_MTD
+ int (*mtd_probe)(struct efx_nic *efx);
+ void (*mtd_rename)(struct efx_mtd_partition *part);
+ int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+ int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
+ int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+ int (*mtd_sync)(struct mtd_info *mtd);
+#endif
+ void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
+ int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
+ int (*ptp_set_ts_config)(struct efx_nic *efx,
+ struct hwtstamp_config *init);
+ int (*sriov_init)(struct efx_nic *efx);
+ void (*sriov_fini)(struct efx_nic *efx);
+ void (*sriov_mac_address_changed)(struct efx_nic *efx);
+ bool (*sriov_wanted)(struct efx_nic *efx);
+ void (*sriov_reset)(struct efx_nic *efx);
+
+ int revision;
+ unsigned int txd_ptr_tbl_base;
+ unsigned int rxd_ptr_tbl_base;
+ unsigned int buf_tbl_base;
+ unsigned int evq_ptr_tbl_base;
+ unsigned int evq_rptr_tbl_base;
+ u64 max_dma_mask;
+ unsigned int rx_prefix_size;
+ unsigned int rx_hash_offset;
+ unsigned int rx_ts_offset;
+ unsigned int rx_buffer_padding;
+ bool can_rx_scatter;
+ bool always_rx_scatter;
+ unsigned int max_interrupt_mode;
+ unsigned int timer_period_max;
+ netdev_features_t offload_features;
+ int mcdi_max_ver;
+ unsigned int max_rx_ip_filters;
+ u32 hwtstamp_filters;
+};
+
+/**************************************************************************
+ *
+ * Prototypes and inline functions
+ *
+ *************************************************************************/
+
+static inline struct efx_channel *
+efx_get_channel(struct efx_nic *efx, unsigned index)
+{
+ EFX_BUG_ON_PARANOID(index >= efx->n_channels);
+ return efx->channel[index];
+}
+
+/* Iterate over all used channels */
+#define efx_for_each_channel(_channel, _efx) \
+ for (_channel = (_efx)->channel[0]; \
+ _channel; \
+ _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
+ (_efx)->channel[_channel->channel + 1] : NULL)
+
+/* Iterate over all used channels in reverse */
+#define efx_for_each_channel_rev(_channel, _efx) \
+ for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \
+ _channel; \
+ _channel = _channel->channel ? \
+ (_efx)->channel[_channel->channel - 1] : NULL)
+
+static inline struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
+{
+ EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
+ type >= EFX_TXQ_TYPES);
+ return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
+}
+
+static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
+static inline struct efx_tx_queue *
+efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
+{
+ EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
+ type >= EFX_TXQ_TYPES);
+ return &channel->tx_queue[type];
+}
+
+static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
+{
+ return !(tx_queue->efx->net_dev->num_tc < 2 &&
+ tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
+}
+
+/* Iterate over all TX queues belonging to a channel */
+#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
+ if (!efx_channel_has_tx_queues(_channel)) \
+ ; \
+ else \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
+ efx_tx_queue_used(_tx_queue); \
+ _tx_queue++)
+
+/* Iterate over all possible TX queues belonging to a channel */
+#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
+ if (!efx_channel_has_tx_queues(_channel)) \
+ ; \
+ else \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+ _tx_queue++)
+
+static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
+{
+ return channel->rx_queue.core_index >= 0;
+}
+
+static inline struct efx_rx_queue *
+efx_channel_get_rx_queue(struct efx_channel *channel)
+{
+ EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
+ return &channel->rx_queue;
+}
+
+/* Iterate over all RX queues belonging to a channel */
+#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
+ if (!efx_channel_has_rx_queue(_channel)) \
+ ; \
+ else \
+ for (_rx_queue = &(_channel)->rx_queue; \
+ _rx_queue; \
+ _rx_queue = NULL)
+
+static inline struct efx_channel *
+efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
+{
+ return container_of(rx_queue, struct efx_channel, rx_queue);
+}
+
+static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
+{
+ return efx_rx_queue_channel(rx_queue)->channel;
+}
+
+/* Returns a pointer to the specified receive buffer in the RX
+ * descriptor queue.
+ */
+static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
+ unsigned int index)
+{
+ return &rx_queue->buffer[index];
+}
+
+/**
+ * EFX_MAX_FRAME_LEN - calculate maximum frame length
+ *
+ * This calculates the maximum frame length that will be used for a
+ * given MTU. The frame length will be equal to the MTU plus a
+ * constant amount of header space and padding. This is the quantity
+ * that the net driver will program into the MAC as the maximum frame
+ * length.
+ *
+ * The 10G MAC requires 8-byte alignment on the frame
+ * length, so we round up to the nearest 8.
+ *
+ * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
+ * XGMII cycle). If the frame length reaches the maximum value in the
+ * same cycle, the XMAC can miss the IPG altogether. We work around
+ * this by adding a further 16 bytes.
+ */
+#define EFX_MAX_FRAME_LEN(mtu) \
+ ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
+
+static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
+}
+static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
+{
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
+
+#endif /* EFX_NET_DRIVER_H */
diff --git a/kernel/drivers/net/ethernet/sfc/nic.c b/kernel/drivers/net/ethernet/sfc/nic.c
new file mode 100644
index 000000000..89b83e59e
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/nic.c
@@ -0,0 +1,534 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/cpu_rmap.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "ef10_regs.h"
+#include "farch_regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * Generic buffer handling
+ * These buffers are used for interrupt status, MAC stats, etc.
+ *
+ **************************************************************************/
+
+int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+ unsigned int len, gfp_t gfp_flags)
+{
+ buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
+ if (!buffer->addr)
+ return -ENOMEM;
+ buffer->len = len;
+ return 0;
+}
+
+void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
+{
+ if (buffer->addr) {
+ dma_free_coherent(&efx->pci_dev->dev, buffer->len,
+ buffer->addr, buffer->dma_addr);
+ buffer->addr = NULL;
+ }
+}
+
+/* Check whether an event is present in the eventq at the current
+ * read pointer. Only useful for self-test.
+ */
+bool efx_nic_event_present(struct efx_channel *channel)
+{
+ return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+}
+
+void efx_nic_event_test_start(struct efx_channel *channel)
+{
+ channel->event_test_cpu = -1;
+ smp_wmb();
+ channel->efx->type->ev_test_generate(channel);
+}
+
+void efx_nic_irq_test_start(struct efx_nic *efx)
+{
+ efx->last_irq_cpu = -1;
+ smp_wmb();
+ efx->type->irq_test_generate(efx);
+}
+
+/* Hook interrupt handler(s)
+ * Try MSI and then legacy interrupts.
+ */
+int efx_nic_init_interrupt(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ unsigned int n_irqs;
+ int rc;
+
+ if (!EFX_INT_MODE_USE_MSI(efx)) {
+ rc = request_irq(efx->legacy_irq,
+ efx->type->irq_handle_legacy, IRQF_SHARED,
+ efx->name, efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook legacy IRQ %d\n",
+ efx->pci_dev->irq);
+ goto fail1;
+ }
+ return 0;
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+ efx->net_dev->rx_cpu_rmap =
+ alloc_irq_cpu_rmap(efx->n_rx_channels);
+ if (!efx->net_dev->rx_cpu_rmap) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+ }
+#endif
+
+ /* Hook MSI or MSI-X interrupt */
+ n_irqs = 0;
+ efx_for_each_channel(channel, efx) {
+ rc = request_irq(channel->irq, efx->type->irq_handle_msi,
+ IRQF_PROBE_SHARED, /* Not shared */
+ efx->msi_context[channel->channel].name,
+ &efx->msi_context[channel->channel]);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook IRQ %d\n", channel->irq);
+ goto fail2;
+ }
+ ++n_irqs;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
+ channel->channel < efx->n_rx_channels) {
+ rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
+ channel->irq);
+ if (rc)
+ goto fail2;
+ }
+#endif
+ }
+
+ return 0;
+
+ fail2:
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+#endif
+ efx_for_each_channel(channel, efx) {
+ if (n_irqs-- == 0)
+ break;
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
+ }
+ fail1:
+ return rc;
+}
+
+void efx_nic_fini_interrupt(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+ efx->net_dev->rx_cpu_rmap = NULL;
+#endif
+
+ if (EFX_INT_MODE_USE_MSI(efx)) {
+ /* Disable MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ free_irq(channel->irq,
+ &efx->msi_context[channel->channel]);
+ } else {
+ /* Disable legacy interrupt */
+ free_irq(efx->legacy_irq, efx);
+ }
+}
+
+/* Register dump */
+
+#define REGISTER_REVISION_FA 1
+#define REGISTER_REVISION_FB 2
+#define REGISTER_REVISION_FC 3
+#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
+#define REGISTER_REVISION_ED 4
+#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
+
+struct efx_nic_reg {
+ u32 offset:24;
+ u32 min_revision:3, max_revision:3;
+};
+
+#define REGISTER(name, arch, min_rev, max_rev) { \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name, \
+ REGISTER_REVISION_ ## arch ## min_rev, \
+ REGISTER_REVISION_ ## arch ## max_rev \
+}
+#define REGISTER_AA(name) REGISTER(name, F, A, A)
+#define REGISTER_AB(name) REGISTER(name, F, A, B)
+#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
+#define REGISTER_BB(name) REGISTER(name, F, B, B)
+#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
+#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
+
+static const struct efx_nic_reg efx_nic_regs[] = {
+ REGISTER_AZ(ADR_REGION),
+ REGISTER_AZ(INT_EN_KER),
+ REGISTER_BZ(INT_EN_CHAR),
+ REGISTER_AZ(INT_ADR_KER),
+ REGISTER_BZ(INT_ADR_CHAR),
+ /* INT_ACK_KER is WO */
+ /* INT_ISR0 is RC */
+ REGISTER_AZ(HW_INIT),
+ REGISTER_CZ(USR_EV_CFG),
+ REGISTER_AB(EE_SPI_HCMD),
+ REGISTER_AB(EE_SPI_HADR),
+ REGISTER_AB(EE_SPI_HDATA),
+ REGISTER_AB(EE_BASE_PAGE),
+ REGISTER_AB(EE_VPD_CFG0),
+ /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
+ /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
+ /* PCIE_CORE_INDIRECT is indirect */
+ REGISTER_AB(NIC_STAT),
+ REGISTER_AB(GPIO_CTL),
+ REGISTER_AB(GLB_CTL),
+ /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
+ REGISTER_BZ(DP_CTRL),
+ REGISTER_AZ(MEM_STAT),
+ REGISTER_AZ(CS_DEBUG),
+ REGISTER_AZ(ALTERA_BUILD),
+ REGISTER_AZ(CSR_SPARE),
+ REGISTER_AB(PCIE_SD_CTL0123),
+ REGISTER_AB(PCIE_SD_CTL45),
+ REGISTER_AB(PCIE_PCS_CTL_STAT),
+ /* DEBUG_DATA_OUT is not used */
+ /* DRV_EV is WO */
+ REGISTER_AZ(EVQ_CTL),
+ REGISTER_AZ(EVQ_CNT1),
+ REGISTER_AZ(EVQ_CNT2),
+ REGISTER_AZ(BUF_TBL_CFG),
+ REGISTER_AZ(SRM_RX_DC_CFG),
+ REGISTER_AZ(SRM_TX_DC_CFG),
+ REGISTER_AZ(SRM_CFG),
+ /* BUF_TBL_UPD is WO */
+ REGISTER_AZ(SRM_UPD_EVQ),
+ REGISTER_AZ(SRAM_PARITY),
+ REGISTER_AZ(RX_CFG),
+ REGISTER_BZ(RX_FILTER_CTL),
+ /* RX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(RX_DC_CFG),
+ REGISTER_AZ(RX_DC_PF_WM),
+ REGISTER_BZ(RX_RSS_TKEY),
+ /* RX_NODESC_DROP is RC */
+ REGISTER_AA(RX_SELF_RST),
+ /* RX_DEBUG, RX_PUSH_DROP are not used */
+ REGISTER_CZ(RX_RSS_IPV6_REG1),
+ REGISTER_CZ(RX_RSS_IPV6_REG2),
+ REGISTER_CZ(RX_RSS_IPV6_REG3),
+ /* TX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(TX_DC_CFG),
+ REGISTER_AA(TX_CHKSM_CFG),
+ REGISTER_AZ(TX_CFG),
+ /* TX_PUSH_DROP is not used */
+ REGISTER_AZ(TX_RESERVED),
+ REGISTER_BZ(TX_PACE),
+ /* TX_PACE_DROP_QID is RC */
+ REGISTER_BB(TX_VLAN),
+ REGISTER_BZ(TX_IPFIL_PORTEN),
+ REGISTER_AB(MD_TXD),
+ REGISTER_AB(MD_RXD),
+ REGISTER_AB(MD_CS),
+ REGISTER_AB(MD_PHY_ADR),
+ REGISTER_AB(MD_ID),
+ /* MD_STAT is RC */
+ REGISTER_AB(MAC_STAT_DMA),
+ REGISTER_AB(MAC_CTRL),
+ REGISTER_BB(GEN_MODE),
+ REGISTER_AB(MAC_MC_HASH_REG0),
+ REGISTER_AB(MAC_MC_HASH_REG1),
+ REGISTER_AB(GM_CFG1),
+ REGISTER_AB(GM_CFG2),
+ /* GM_IPG and GM_HD are not used */
+ REGISTER_AB(GM_MAX_FLEN),
+ /* GM_TEST is not used */
+ REGISTER_AB(GM_ADR1),
+ REGISTER_AB(GM_ADR2),
+ REGISTER_AB(GMF_CFG0),
+ REGISTER_AB(GMF_CFG1),
+ REGISTER_AB(GMF_CFG2),
+ REGISTER_AB(GMF_CFG3),
+ REGISTER_AB(GMF_CFG4),
+ REGISTER_AB(GMF_CFG5),
+ REGISTER_BB(TX_SRC_MAC_CTL),
+ REGISTER_AB(XM_ADR_LO),
+ REGISTER_AB(XM_ADR_HI),
+ REGISTER_AB(XM_GLB_CFG),
+ REGISTER_AB(XM_TX_CFG),
+ REGISTER_AB(XM_RX_CFG),
+ REGISTER_AB(XM_MGT_INT_MASK),
+ REGISTER_AB(XM_FC),
+ REGISTER_AB(XM_PAUSE_TIME),
+ REGISTER_AB(XM_TX_PARAM),
+ REGISTER_AB(XM_RX_PARAM),
+ /* XM_MGT_INT_MSK (note no 'A') is RC */
+ REGISTER_AB(XX_PWR_RST),
+ REGISTER_AB(XX_SD_CTL),
+ REGISTER_AB(XX_TXDRV_CTL),
+ /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
+ /* XX_CORE_STAT is partly RC */
+ REGISTER_DZ(BIU_HW_REV_ID),
+ REGISTER_DZ(MC_DB_LWRD),
+ REGISTER_DZ(MC_DB_HWRD),
+};
+
+struct efx_nic_reg_table {
+ u32 offset:24;
+ u32 min_revision:3, max_revision:3;
+ u32 step:6, rows:21;
+};
+
+#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
+ offset, \
+ REGISTER_REVISION_ ## arch ## min_rev, \
+ REGISTER_REVISION_ ## arch ## max_rev, \
+ step, rows \
+}
+#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
+ REGISTER_TABLE_DIMENSIONS( \
+ name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
+ arch, min_rev, max_rev, \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
+ arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
+#define REGISTER_TABLE_BB_CZ(name) \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_BB_ ## name ## _ROWS), \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_CZ_ ## name ## _ROWS)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
+#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
+
+static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
+ /* DRIVER is not used */
+ /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
+ REGISTER_TABLE_BB(TX_IPFIL_TBL),
+ REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
+ REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
+ /* We can't reasonably read all of the buffer table (up to 8MB!).
+ * However this driver will only use a few entries. Reading
+ * 1K entries allows for some expansion of queue count and
+ * size before we need to change the version. */
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
+ F, A, A, 8, 1024),
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
+ F, B, Z, 8, 1024),
+ REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_BB_CZ(TIMER_TBL),
+ REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
+ REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
+ /* TX_FILTER_TBL0 is huge and not used by this driver */
+ REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_CZ(MC_TREG_SMEM),
+ /* MSIX_PBA_TABLE is not mapped */
+ /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
+ REGISTER_TABLE_BZ(RX_FILTER_TBL0),
+ REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
+};
+
+size_t efx_nic_get_regs_len(struct efx_nic *efx)
+{
+ const struct efx_nic_reg *reg;
+ const struct efx_nic_reg_table *table;
+ size_t len = 0;
+
+ for (reg = efx_nic_regs;
+ reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+ reg++)
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision)
+ len += sizeof(efx_oword_t);
+
+ for (table = efx_nic_reg_tables;
+ table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+ table++)
+ if (efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision)
+ len += table->rows * min_t(size_t, table->step, 16);
+
+ return len;
+}
+
+void efx_nic_get_regs(struct efx_nic *efx, void *buf)
+{
+ const struct efx_nic_reg *reg;
+ const struct efx_nic_reg_table *table;
+
+ for (reg = efx_nic_regs;
+ reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+ reg++) {
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision) {
+ efx_reado(efx, (efx_oword_t *)buf, reg->offset);
+ buf += sizeof(efx_oword_t);
+ }
+ }
+
+ for (table = efx_nic_reg_tables;
+ table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+ table++) {
+ size_t size, i;
+
+ if (!(efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision))
+ continue;
+
+ size = min_t(size_t, table->step, 16);
+
+ for (i = 0; i < table->rows; i++) {
+ switch (table->step) {
+ case 4: /* 32-bit SRAM */
+ efx_readd(efx, buf, table->offset + 4 * i);
+ break;
+ case 8: /* 64-bit SRAM */
+ efx_sram_readq(efx,
+ efx->membase + table->offset,
+ buf, i);
+ break;
+ case 16: /* 128-bit-readable register */
+ efx_reado_table(efx, buf, table->offset, i);
+ break;
+ case 32: /* 128-bit register, interleaved */
+ efx_reado_table(efx, buf, table->offset, 2 * i);
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ buf += size;
+ }
+ }
+}
+
+/**
+ * efx_nic_describe_stats - Describe supported statistics for ethtool
+ * @desc: Array of &struct efx_hw_stat_desc describing the statistics
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @names: Buffer to copy names to, or %NULL. The names are copied
+ * starting at intervals of %ETH_GSTRING_LEN bytes.
+ *
+ * Returns the number of visible statistics, i.e. the number of set
+ * bits in the first @count bits of @mask for which a name is defined.
+ */
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names)
+{
+ size_t visible = 0;
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].name) {
+ if (names) {
+ strlcpy(names, desc[index].name,
+ ETH_GSTRING_LEN);
+ names += ETH_GSTRING_LEN;
+ }
+ ++visible;
+ }
+ }
+
+ return visible;
+}
+
+/**
+ * efx_nic_update_stats - Convert statistics DMA buffer to array of u64
+ * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
+ * layout. DMA widths of 0, 16, 32 and 64 are supported; where
+ * the width is specified as 0 the corresponding element of
+ * @stats is not updated.
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @stats: Buffer to update with the converted statistics. The length
+ * of this array must be at least @count.
+ * @dma_buf: DMA buffer containing hardware statistics
+ * @accumulate: If set, the converted values will be added rather than
+ * directly stored to the corresponding elements of @stats
+ */
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask,
+ u64 *stats, const void *dma_buf, bool accumulate)
+{
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].dma_width) {
+ const void *addr = dma_buf + desc[index].offset;
+ u64 val;
+
+ switch (desc[index].dma_width) {
+ case 16:
+ val = le16_to_cpup((__le16 *)addr);
+ break;
+ case 32:
+ val = le32_to_cpup((__le32 *)addr);
+ break;
+ case 64:
+ val = le64_to_cpup((__le64 *)addr);
+ break;
+ default:
+ WARN_ON(1);
+ val = 0;
+ break;
+ }
+
+ if (accumulate)
+ stats[index] += val;
+ else
+ stats[index] = val;
+ }
+ }
+}
+
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
+{
+ /* if down, or this is the first update after coming up */
+ if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
+ efx->rx_nodesc_drops_while_down +=
+ *rx_nodesc_drops - efx->rx_nodesc_drops_total;
+ efx->rx_nodesc_drops_total = *rx_nodesc_drops;
+ efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
+ *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
+}
diff --git a/kernel/drivers/net/ethernet/sfc/nic.h b/kernel/drivers/net/ethernet/sfc/nic.h
new file mode 100644
index 000000000..93d10cbbd
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/nic.h
@@ -0,0 +1,868 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_NIC_H
+#define EFX_NIC_H
+
+#include <linux/net_tstamp.h>
+#include <linux/i2c-algo-bit.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "mcdi.h"
+
+enum {
+ EFX_REV_FALCON_A0 = 0,
+ EFX_REV_FALCON_A1 = 1,
+ EFX_REV_FALCON_B0 = 2,
+ EFX_REV_SIENA_A0 = 3,
+ EFX_REV_HUNT_A0 = 4,
+};
+
+static inline int efx_nic_rev(struct efx_nic *efx)
+{
+ return efx->type->revision;
+}
+
+u32 efx_farch_fpga_ver(struct efx_nic *efx);
+
+/* NIC has two interlinked PCI functions for the same port. */
+static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
+{
+ return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
+}
+
+/* Read the current event from the event queue */
+static inline efx_qword_t *efx_event(struct efx_channel *channel,
+ unsigned int index)
+{
+ return ((efx_qword_t *) (channel->eventq.buf.addr)) +
+ (index & channel->eventq_mask);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones. We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords. This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int efx_event_present(efx_qword_t *event)
+{
+ return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+ EFX_DWORD_IS_ALL_ONES(event->dword[1]));
+}
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline efx_qword_t *
+efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
+}
+
+/* Get partner of a TX queue, seen as part of the same net core queue */
+static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
+{
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+ return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
+ else
+ return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
+}
+
+/* Report whether this TX queue would be empty for the given write_count.
+ * May return false negative.
+ */
+static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
+ unsigned int write_count)
+{
+ unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
+
+/* Decide whether we can use TX PIO, ie. write packet data directly into
+ * a buffer on the device. This can reduce latency at the expense of
+ * throughput, so we only do this if both hardware and software TX rings
+ * are empty. This also ensures that only one packet at a time can be
+ * using the PIO buffer.
+ */
+static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue);
+ return tx_queue->piobuf &&
+ __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) &&
+ __efx_nic_tx_is_empty(partner, partner->insert_count);
+}
+
+/* Decide whether to push a TX descriptor to the NIC vs merely writing
+ * the doorbell. This can reduce latency when we are adding a single
+ * descriptor to an empty queue, but is otherwise pointless. Further,
+ * Falcon and Siena have hardware bugs (SF bug 33851) that may be
+ * triggered if we don't check this.
+ * We use the write_count used for the last doorbell push, to get the
+ * NIC's view of the tx queue.
+ */
+static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
+ unsigned int write_count)
+{
+ bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
+
+ tx_queue->empty_read_count = 0;
+ return was_empty && tx_queue->write_count - write_count == 1;
+}
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline efx_qword_t *
+efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
+}
+
+enum {
+ PHY_TYPE_NONE = 0,
+ PHY_TYPE_TXC43128 = 1,
+ PHY_TYPE_88E1111 = 2,
+ PHY_TYPE_SFX7101 = 3,
+ PHY_TYPE_QT2022C2 = 4,
+ PHY_TYPE_PM8358 = 6,
+ PHY_TYPE_SFT9001A = 8,
+ PHY_TYPE_QT2025C = 9,
+ PHY_TYPE_SFT9001B = 10,
+};
+
+#define FALCON_XMAC_LOOPBACKS \
+ ((1 << LOOPBACK_XGMII) | \
+ (1 << LOOPBACK_XGXS) | \
+ (1 << LOOPBACK_XAUI))
+
+/* Alignment of PCIe DMA boundaries (4KB) */
+#define EFX_PAGE_SIZE 4096
+/* Size and alignment of buffer table entries (same) */
+#define EFX_BUF_SIZE EFX_PAGE_SIZE
+
+/* NIC-generic software stats */
+enum {
+ GENERIC_STAT_rx_noskb_drops,
+ GENERIC_STAT_rx_nodesc_trunc,
+ GENERIC_STAT_COUNT
+};
+
+/**
+ * struct falcon_board_type - board operations and type information
+ * @id: Board type id, as found in NVRAM
+ * @init: Allocate resources and initialise peripheral hardware
+ * @init_phy: Do board-specific PHY initialisation
+ * @fini: Shut down hardware and free resources
+ * @set_id_led: Set state of identifying LED or revert to automatic function
+ * @monitor: Board-specific health check function
+ */
+struct falcon_board_type {
+ u8 id;
+ int (*init) (struct efx_nic *nic);
+ void (*init_phy) (struct efx_nic *efx);
+ void (*fini) (struct efx_nic *nic);
+ void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode);
+ int (*monitor) (struct efx_nic *nic);
+};
+
+/**
+ * struct falcon_board - board information
+ * @type: Type of board
+ * @major: Major rev. ('A', 'B' ...)
+ * @minor: Minor rev. (0, 1, ...)
+ * @i2c_adap: I2C adapter for on-board peripherals
+ * @i2c_data: Data for bit-banging algorithm
+ * @hwmon_client: I2C client for hardware monitor
+ * @ioexp_client: I2C client for power/port control
+ */
+struct falcon_board {
+ const struct falcon_board_type *type;
+ int major;
+ int minor;
+ struct i2c_adapter i2c_adap;
+ struct i2c_algo_bit_data i2c_data;
+ struct i2c_client *hwmon_client, *ioexp_client;
+};
+
+/**
+ * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
+ * @device_id: Controller's id for the device
+ * @size: Size (in bytes)
+ * @addr_len: Number of address bytes in read/write commands
+ * @munge_address: Flag whether addresses should be munged.
+ * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
+ * use bit 3 of the command byte as address bit A8, rather
+ * than having a two-byte address. If this flag is set, then
+ * commands should be munged in this way.
+ * @erase_command: Erase command (or 0 if sector erase not needed).
+ * @erase_size: Erase sector size (in bytes)
+ * Erase commands affect sectors with this size and alignment.
+ * This must be a power of two.
+ * @block_size: Write block size (in bytes).
+ * Write commands are limited to blocks with this size and alignment.
+ */
+struct falcon_spi_device {
+ int device_id;
+ unsigned int size;
+ unsigned int addr_len;
+ unsigned int munge_address:1;
+ u8 erase_command;
+ unsigned int erase_size;
+ unsigned int block_size;
+};
+
+static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
+{
+ return spi->size != 0;
+}
+
+enum {
+ FALCON_STAT_tx_bytes = GENERIC_STAT_COUNT,
+ FALCON_STAT_tx_packets,
+ FALCON_STAT_tx_pause,
+ FALCON_STAT_tx_control,
+ FALCON_STAT_tx_unicast,
+ FALCON_STAT_tx_multicast,
+ FALCON_STAT_tx_broadcast,
+ FALCON_STAT_tx_lt64,
+ FALCON_STAT_tx_64,
+ FALCON_STAT_tx_65_to_127,
+ FALCON_STAT_tx_128_to_255,
+ FALCON_STAT_tx_256_to_511,
+ FALCON_STAT_tx_512_to_1023,
+ FALCON_STAT_tx_1024_to_15xx,
+ FALCON_STAT_tx_15xx_to_jumbo,
+ FALCON_STAT_tx_gtjumbo,
+ FALCON_STAT_tx_non_tcpudp,
+ FALCON_STAT_tx_mac_src_error,
+ FALCON_STAT_tx_ip_src_error,
+ FALCON_STAT_rx_bytes,
+ FALCON_STAT_rx_good_bytes,
+ FALCON_STAT_rx_bad_bytes,
+ FALCON_STAT_rx_packets,
+ FALCON_STAT_rx_good,
+ FALCON_STAT_rx_bad,
+ FALCON_STAT_rx_pause,
+ FALCON_STAT_rx_control,
+ FALCON_STAT_rx_unicast,
+ FALCON_STAT_rx_multicast,
+ FALCON_STAT_rx_broadcast,
+ FALCON_STAT_rx_lt64,
+ FALCON_STAT_rx_64,
+ FALCON_STAT_rx_65_to_127,
+ FALCON_STAT_rx_128_to_255,
+ FALCON_STAT_rx_256_to_511,
+ FALCON_STAT_rx_512_to_1023,
+ FALCON_STAT_rx_1024_to_15xx,
+ FALCON_STAT_rx_15xx_to_jumbo,
+ FALCON_STAT_rx_gtjumbo,
+ FALCON_STAT_rx_bad_lt64,
+ FALCON_STAT_rx_bad_gtjumbo,
+ FALCON_STAT_rx_overflow,
+ FALCON_STAT_rx_symbol_error,
+ FALCON_STAT_rx_align_error,
+ FALCON_STAT_rx_length_error,
+ FALCON_STAT_rx_internal_error,
+ FALCON_STAT_rx_nodesc_drop_cnt,
+ FALCON_STAT_COUNT
+};
+
+/**
+ * struct falcon_nic_data - Falcon NIC state
+ * @pci_dev2: Secondary function of Falcon A
+ * @board: Board state and functions
+ * @stats: Hardware statistics
+ * @stats_disable_count: Nest count for disabling statistics fetches
+ * @stats_pending: Is there a pending DMA of MAC statistics.
+ * @stats_timer: A timer for regularly fetching MAC statistics.
+ * @spi_flash: SPI flash device
+ * @spi_eeprom: SPI EEPROM device
+ * @spi_lock: SPI bus lock
+ * @mdio_lock: MDIO bus lock
+ * @xmac_poll_required: XMAC link state needs polling
+ */
+struct falcon_nic_data {
+ struct pci_dev *pci_dev2;
+ struct falcon_board board;
+ u64 stats[FALCON_STAT_COUNT];
+ unsigned int stats_disable_count;
+ bool stats_pending;
+ struct timer_list stats_timer;
+ struct falcon_spi_device spi_flash;
+ struct falcon_spi_device spi_eeprom;
+ struct mutex spi_lock;
+ struct mutex mdio_lock;
+ bool xmac_poll_required;
+};
+
+static inline struct falcon_board *falcon_board(struct efx_nic *efx)
+{
+ struct falcon_nic_data *data = efx->nic_data;
+ return &data->board;
+}
+
+enum {
+ SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
+ SIENA_STAT_tx_good_bytes,
+ SIENA_STAT_tx_bad_bytes,
+ SIENA_STAT_tx_packets,
+ SIENA_STAT_tx_bad,
+ SIENA_STAT_tx_pause,
+ SIENA_STAT_tx_control,
+ SIENA_STAT_tx_unicast,
+ SIENA_STAT_tx_multicast,
+ SIENA_STAT_tx_broadcast,
+ SIENA_STAT_tx_lt64,
+ SIENA_STAT_tx_64,
+ SIENA_STAT_tx_65_to_127,
+ SIENA_STAT_tx_128_to_255,
+ SIENA_STAT_tx_256_to_511,
+ SIENA_STAT_tx_512_to_1023,
+ SIENA_STAT_tx_1024_to_15xx,
+ SIENA_STAT_tx_15xx_to_jumbo,
+ SIENA_STAT_tx_gtjumbo,
+ SIENA_STAT_tx_collision,
+ SIENA_STAT_tx_single_collision,
+ SIENA_STAT_tx_multiple_collision,
+ SIENA_STAT_tx_excessive_collision,
+ SIENA_STAT_tx_deferred,
+ SIENA_STAT_tx_late_collision,
+ SIENA_STAT_tx_excessive_deferred,
+ SIENA_STAT_tx_non_tcpudp,
+ SIENA_STAT_tx_mac_src_error,
+ SIENA_STAT_tx_ip_src_error,
+ SIENA_STAT_rx_bytes,
+ SIENA_STAT_rx_good_bytes,
+ SIENA_STAT_rx_bad_bytes,
+ SIENA_STAT_rx_packets,
+ SIENA_STAT_rx_good,
+ SIENA_STAT_rx_bad,
+ SIENA_STAT_rx_pause,
+ SIENA_STAT_rx_control,
+ SIENA_STAT_rx_unicast,
+ SIENA_STAT_rx_multicast,
+ SIENA_STAT_rx_broadcast,
+ SIENA_STAT_rx_lt64,
+ SIENA_STAT_rx_64,
+ SIENA_STAT_rx_65_to_127,
+ SIENA_STAT_rx_128_to_255,
+ SIENA_STAT_rx_256_to_511,
+ SIENA_STAT_rx_512_to_1023,
+ SIENA_STAT_rx_1024_to_15xx,
+ SIENA_STAT_rx_15xx_to_jumbo,
+ SIENA_STAT_rx_gtjumbo,
+ SIENA_STAT_rx_bad_gtjumbo,
+ SIENA_STAT_rx_overflow,
+ SIENA_STAT_rx_false_carrier,
+ SIENA_STAT_rx_symbol_error,
+ SIENA_STAT_rx_align_error,
+ SIENA_STAT_rx_length_error,
+ SIENA_STAT_rx_internal_error,
+ SIENA_STAT_rx_nodesc_drop_cnt,
+ SIENA_STAT_COUNT
+};
+
+/**
+ * struct siena_nic_data - Siena NIC state
+ * @efx: Pointer back to main interface structure
+ * @wol_filter_id: Wake-on-LAN packet filter id
+ * @stats: Hardware statistics
+ * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
+ * @vfdi_status: Common VFDI status page to be dmad to VF address space.
+ * @local_addr_list: List of local addresses. Protected by %local_lock.
+ * @local_page_list: List of DMA addressable pages used to broadcast
+ * %local_addr_list. Protected by %local_lock.
+ * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
+ * @peer_work: Work item to broadcast peer addresses to VMs.
+ */
+struct siena_nic_data {
+ struct efx_nic *efx;
+ int wol_filter_id;
+ u64 stats[SIENA_STAT_COUNT];
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_channel *vfdi_channel;
+ unsigned vf_buftbl_base;
+ struct efx_buffer vfdi_status;
+ struct list_head local_addr_list;
+ struct list_head local_page_list;
+ struct mutex local_lock;
+ struct work_struct peer_work;
+#endif
+};
+
+enum {
+ EF10_STAT_tx_bytes = GENERIC_STAT_COUNT,
+ EF10_STAT_tx_packets,
+ EF10_STAT_tx_pause,
+ EF10_STAT_tx_control,
+ EF10_STAT_tx_unicast,
+ EF10_STAT_tx_multicast,
+ EF10_STAT_tx_broadcast,
+ EF10_STAT_tx_lt64,
+ EF10_STAT_tx_64,
+ EF10_STAT_tx_65_to_127,
+ EF10_STAT_tx_128_to_255,
+ EF10_STAT_tx_256_to_511,
+ EF10_STAT_tx_512_to_1023,
+ EF10_STAT_tx_1024_to_15xx,
+ EF10_STAT_tx_15xx_to_jumbo,
+ EF10_STAT_rx_bytes,
+ EF10_STAT_rx_bytes_minus_good_bytes,
+ EF10_STAT_rx_good_bytes,
+ EF10_STAT_rx_bad_bytes,
+ EF10_STAT_rx_packets,
+ EF10_STAT_rx_good,
+ EF10_STAT_rx_bad,
+ EF10_STAT_rx_pause,
+ EF10_STAT_rx_control,
+ EF10_STAT_rx_unicast,
+ EF10_STAT_rx_multicast,
+ EF10_STAT_rx_broadcast,
+ EF10_STAT_rx_lt64,
+ EF10_STAT_rx_64,
+ EF10_STAT_rx_65_to_127,
+ EF10_STAT_rx_128_to_255,
+ EF10_STAT_rx_256_to_511,
+ EF10_STAT_rx_512_to_1023,
+ EF10_STAT_rx_1024_to_15xx,
+ EF10_STAT_rx_15xx_to_jumbo,
+ EF10_STAT_rx_gtjumbo,
+ EF10_STAT_rx_bad_gtjumbo,
+ EF10_STAT_rx_overflow,
+ EF10_STAT_rx_align_error,
+ EF10_STAT_rx_length_error,
+ EF10_STAT_rx_nodesc_drops,
+ EF10_STAT_rx_pm_trunc_bb_overflow,
+ EF10_STAT_rx_pm_discard_bb_overflow,
+ EF10_STAT_rx_pm_trunc_vfifo_full,
+ EF10_STAT_rx_pm_discard_vfifo_full,
+ EF10_STAT_rx_pm_trunc_qbb,
+ EF10_STAT_rx_pm_discard_qbb,
+ EF10_STAT_rx_pm_discard_mapping,
+ EF10_STAT_rx_dp_q_disabled_packets,
+ EF10_STAT_rx_dp_di_dropped_packets,
+ EF10_STAT_rx_dp_streaming_packets,
+ EF10_STAT_rx_dp_hlb_fetch,
+ EF10_STAT_rx_dp_hlb_wait,
+ EF10_STAT_COUNT
+};
+
+/* Maximum number of TX PIO buffers we may allocate to a function.
+ * This matches the total number of buffers on each SFC9100-family
+ * controller.
+ */
+#define EF10_TX_PIOBUF_COUNT 16
+
+/**
+ * struct efx_ef10_nic_data - EF10 architecture NIC state
+ * @mcdi_buf: DMA buffer for MCDI
+ * @warm_boot_count: Last seen MC warm boot count
+ * @vi_base: Absolute index of first VI in this function
+ * @n_allocated_vis: Number of VIs allocated to this function
+ * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
+ * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
+ * @n_piobufs: Number of PIO buffers allocated to this function
+ * @wc_membase: Base address of write-combining mapping of the memory BAR
+ * @pio_write_base: Base address for writing PIO buffers
+ * @pio_write_vi_base: Relative VI number for @pio_write_base
+ * @piobuf_handle: Handle of each PIO buffer allocated
+ * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
+ * reboot
+ * @rx_rss_context: Firmware handle for our RSS context
+ * @stats: Hardware statistics
+ * @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
+ * after MC reboot
+ * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
+ * %MC_CMD_GET_CAPABILITIES response)
+ */
+struct efx_ef10_nic_data {
+ struct efx_buffer mcdi_buf;
+ u16 warm_boot_count;
+ unsigned int vi_base;
+ unsigned int n_allocated_vis;
+ bool must_realloc_vis;
+ bool must_restore_filters;
+ unsigned int n_piobufs;
+ void __iomem *wc_membase, *pio_write_base;
+ unsigned int pio_write_vi_base;
+ unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
+ bool must_restore_piobufs;
+ u32 rx_rss_context;
+ u64 stats[EF10_STAT_COUNT];
+ bool workaround_35388;
+ bool must_check_datapath_caps;
+ u32 datapath_caps;
+};
+
+/*
+ * On the SFC9000 family each port is associated with 1 PCI physical
+ * function (PF) handled by sfc and a configurable number of virtual
+ * functions (VFs) that may be handled by some other driver, often in
+ * a VM guest. The queue pointer registers are mapped in both PF and
+ * VF BARs such that an 8K region provides access to a single RX, TX
+ * and event queue (collectively a Virtual Interface, VI or VNIC).
+ *
+ * The PF has access to all 1024 VIs while VFs are mapped to VIs
+ * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
+ * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
+ * The number of VIs and the VI_SCALE value are configurable but must
+ * be established at boot time by firmware.
+ */
+
+/* Maximum VI_SCALE parameter supported by Siena */
+#define EFX_VI_SCALE_MAX 6
+/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
+ * so this is the smallest allowed value. */
+#define EFX_VI_BASE 128U
+/* Maximum number of VFs allowed */
+#define EFX_VF_COUNT_MAX 127
+/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
+#define EFX_MAX_VF_EVQ_SIZE 8192UL
+/* The number of buffer table entries reserved for each VI on a VF */
+#define EFX_VF_BUFTBL_PER_VI \
+ ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
+ sizeof(efx_qword_t) / EFX_BUF_SIZE)
+
+#ifdef CONFIG_SFC_SRIOV
+
+/* SIENA */
+static inline bool efx_siena_sriov_wanted(struct efx_nic *efx)
+{
+ return efx->vf_count != 0;
+}
+
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+ return efx->vf_init_count != 0;
+}
+
+static inline unsigned int efx_vf_size(struct efx_nic *efx)
+{
+ return 1 << efx->vi_scale;
+}
+
+int efx_init_sriov(void);
+void efx_siena_sriov_probe(struct efx_nic *efx);
+int efx_siena_sriov_init(struct efx_nic *efx);
+void efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
+void efx_siena_sriov_reset(struct efx_nic *efx);
+void efx_siena_sriov_fini(struct efx_nic *efx);
+void efx_fini_sriov(void);
+
+/* EF10 */
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
+
+#else
+
+/* SIENA */
+static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; }
+static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
+static inline int efx_init_sriov(void) { return 0; }
+static inline void efx_siena_sriov_probe(struct efx_nic *efx) {}
+static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx,
+ efx_qword_t *event) {}
+static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx,
+ efx_qword_t *event) {}
+static inline void efx_siena_sriov_event(struct efx_channel *channel,
+ efx_qword_t *event) {}
+static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx,
+ unsigned dmaq) {}
+static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {}
+static inline void efx_siena_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_siena_sriov_fini(struct efx_nic *efx) {}
+static inline void efx_fini_sriov(void) {}
+
+/* EF10 */
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
+
+#endif
+
+/* FALCON */
+static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; }
+static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
+static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {}
+static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {}
+static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {}
+
+int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos);
+int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivf);
+int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
+ bool spoofchk);
+
+struct ethtool_ts_info;
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+void efx_ptp_remove(struct efx_nic *efx);
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_get_mode(struct efx_nic *efx);
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode);
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb);
+static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ if (channel->sync_events_state == SYNC_EVENTS_VALID)
+ __efx_rx_skb_attach_timestamp(channel, skb);
+}
+void efx_ptp_start_datapath(struct efx_nic *efx);
+void efx_ptp_stop_datapath(struct efx_nic *efx);
+
+extern const struct efx_nic_type falcon_a1_nic_type;
+extern const struct efx_nic_type falcon_b0_nic_type;
+extern const struct efx_nic_type siena_a0_nic_type;
+extern const struct efx_nic_type efx_hunt_a0_nic_type;
+
+/**************************************************************************
+ *
+ * Externs
+ *
+ **************************************************************************
+ */
+
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+
+/* TX data path */
+static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
+{
+ return tx_queue->efx->type->tx_probe(tx_queue);
+}
+static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_init(tx_queue);
+}
+static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_remove(tx_queue);
+}
+static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_write(tx_queue);
+}
+
+/* RX data path */
+static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
+{
+ return rx_queue->efx->type->rx_probe(rx_queue);
+}
+static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_init(rx_queue);
+}
+static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_remove(rx_queue);
+}
+static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_write(rx_queue);
+}
+static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_defer_refill(rx_queue);
+}
+
+/* Event data path */
+static inline int efx_nic_probe_eventq(struct efx_channel *channel)
+{
+ return channel->efx->type->ev_probe(channel);
+}
+static inline int efx_nic_init_eventq(struct efx_channel *channel)
+{
+ return channel->efx->type->ev_init(channel);
+}
+static inline void efx_nic_fini_eventq(struct efx_channel *channel)
+{
+ channel->efx->type->ev_fini(channel);
+}
+static inline void efx_nic_remove_eventq(struct efx_channel *channel)
+{
+ channel->efx->type->ev_remove(channel);
+}
+static inline int
+efx_nic_process_eventq(struct efx_channel *channel, int quota)
+{
+ return channel->efx->type->ev_process(channel, quota);
+}
+static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
+{
+ channel->efx->type->ev_read_ack(channel);
+}
+void efx_nic_event_test_start(struct efx_channel *channel);
+
+/* Falcon/Siena queue operations */
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+int efx_farch_ev_probe(struct efx_channel *channel);
+int efx_farch_ev_init(struct efx_channel *channel);
+void efx_farch_ev_fini(struct efx_channel *channel);
+void efx_farch_ev_remove(struct efx_channel *channel);
+int efx_farch_ev_process(struct efx_channel *channel, int quota);
+void efx_farch_ev_read_ack(struct efx_channel *channel);
+void efx_farch_ev_test_generate(struct efx_channel *channel);
+
+/* Falcon/Siena filter operations */
+int efx_farch_filter_table_probe(struct efx_nic *efx);
+void efx_farch_filter_table_restore(struct efx_nic *efx);
+void efx_farch_filter_table_remove(struct efx_nic *efx);
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+ bool replace);
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority, u32 filter_id,
+ struct efx_filter_spec *);
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority, u32 *buf,
+ u32 size);
+#ifdef CONFIG_RFS_ACCEL
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+
+bool efx_nic_event_present(struct efx_channel *channel);
+
+/* Some statistics are computed as A - B where A and B each increase
+ * linearly with some hardware counter(s) and the counters are read
+ * asynchronously. If the counters contributing to B are always read
+ * after those contributing to A, the computed value may be lower than
+ * the true value by some variable amount, and may decrease between
+ * subsequent computations.
+ *
+ * We should never allow statistics to decrease or to exceed the true
+ * value. Since the computed value will never be greater than the
+ * true value, we can achieve this by only storing the computed value
+ * when it increases.
+ */
+static inline void efx_update_diff_stat(u64 *stat, u64 diff)
+{
+ if ((s64)(diff - *stat) > 0)
+ *stat = diff;
+}
+
+/* Interrupts */
+int efx_nic_init_interrupt(struct efx_nic *efx);
+void efx_nic_irq_test_start(struct efx_nic *efx);
+void efx_nic_fini_interrupt(struct efx_nic *efx);
+
+/* Falcon/Siena interrupts */
+void efx_farch_irq_enable_master(struct efx_nic *efx);
+void efx_farch_irq_test_generate(struct efx_nic *efx);
+void efx_farch_irq_disable_master(struct efx_nic *efx);
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
+
+static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
+{
+ return ACCESS_ONCE(channel->event_test_cpu);
+}
+static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
+{
+ return ACCESS_ONCE(efx->last_irq_cpu);
+}
+
+/* Global Resources */
+int efx_nic_flush_queues(struct efx_nic *efx);
+void siena_prepare_flush(struct efx_nic *efx);
+int efx_farch_fini_dmaq(struct efx_nic *efx);
+void efx_farch_finish_flr(struct efx_nic *efx);
+void siena_finish_flush(struct efx_nic *efx);
+void falcon_start_nic_stats(struct efx_nic *efx);
+void falcon_stop_nic_stats(struct efx_nic *efx);
+int falcon_reset_xaui(struct efx_nic *efx);
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+void efx_farch_init_common(struct efx_nic *efx);
+void efx_ef10_handle_drain_event(struct efx_nic *efx);
+void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+
+int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+ unsigned int len, gfp_t gfp_flags);
+void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
+
+/* Tests */
+struct efx_farch_register_test {
+ unsigned address;
+ efx_oword_t mask;
+};
+int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs);
+
+size_t efx_nic_get_regs_len(struct efx_nic *efx);
+void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names);
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u64 *stats,
+ const void *dma_buf, bool accumulate);
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
+
+#define EFX_MAX_FLUSH_TIME 5000
+
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event);
+
+#endif /* EFX_NIC_H */
diff --git a/kernel/drivers/net/ethernet/sfc/phy.h b/kernel/drivers/net/ethernet/sfc/phy.h
new file mode 100644
index 000000000..803bf445c
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/phy.h
@@ -0,0 +1,50 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2010 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_PHY_H
+#define EFX_PHY_H
+
+/****************************************************************************
+ * 10Xpress (SFX7101) PHY
+ */
+extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
+
+void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+
+/****************************************************************************
+ * AMCC/Quake QT202x PHYs
+ */
+extern const struct efx_phy_operations falcon_qt202x_phy_ops;
+
+/* These PHYs provide various H/W control states for LEDs */
+#define QUAKE_LED_LINK_INVAL (0)
+#define QUAKE_LED_LINK_STAT (1)
+#define QUAKE_LED_LINK_ACT (2)
+#define QUAKE_LED_LINK_ACTSTAT (3)
+#define QUAKE_LED_OFF (4)
+#define QUAKE_LED_ON (5)
+#define QUAKE_LED_LINK_INPUT (6) /* Pin is an input. */
+/* What link the LED tracks */
+#define QUAKE_LED_TXLINK (0)
+#define QUAKE_LED_RXLINK (8)
+
+void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
+
+/****************************************************************************
+* Transwitch CX4 retimer
+*/
+extern const struct efx_phy_operations falcon_txc_phy_ops;
+
+#define TXC_GPIO_DIR_INPUT 0
+#define TXC_GPIO_DIR_OUTPUT 1
+
+void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
+void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
+
+#endif
diff --git a/kernel/drivers/net/ethernet/sfc/ptp.c b/kernel/drivers/net/ethernet/sfc/ptp.c
new file mode 100644
index 000000000..a2e9aee05
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/ptp.c
@@ -0,0 +1,1939 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/* Theory of operation:
+ *
+ * PTP support is assisted by firmware running on the MC, which provides
+ * the hardware timestamping capabilities. Both transmitted and received
+ * PTP event packets are queued onto internal queues for subsequent processing;
+ * this is because the MC operations are relatively long and would block
+ * block NAPI/interrupt operation.
+ *
+ * Receive event processing:
+ * The event contains the packet's UUID and sequence number, together
+ * with the hardware timestamp. The PTP receive packet queue is searched
+ * for this UUID/sequence number and, if found, put on a pending queue.
+ * Packets not matching are delivered without timestamps (MCDI events will
+ * always arrive after the actual packet).
+ * It is important for the operation of the PTP protocol that the ordering
+ * of packets between the event and general port is maintained.
+ *
+ * Work queue processing:
+ * If work waiting, synchronise host/hardware time
+ *
+ * Transmit: send packet through MC, which returns the transmission time
+ * that is converted to an appropriate timestamp.
+ *
+ * Receive: the packet's reception time is converted to an appropriate
+ * timestamp.
+ */
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/time.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/pps_kernel.h>
+#include <linux/ptp_clock_kernel.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "io.h"
+#include "farch_regs.h"
+#include "nic.h"
+
+/* Maximum number of events expected to make up a PTP event */
+#define MAX_EVENT_FRAGS 3
+
+/* Maximum delay, ms, to begin synchronisation */
+#define MAX_SYNCHRONISE_WAIT_MS 2
+
+/* How long, at most, to spend synchronising */
+#define SYNCHRONISE_PERIOD_NS 250000
+
+/* How often to update the shared memory time */
+#define SYNCHRONISATION_GRANULARITY_NS 200
+
+/* Minimum permitted length of a (corrected) synchronisation time */
+#define DEFAULT_MIN_SYNCHRONISATION_NS 120
+
+/* Maximum permitted length of a (corrected) synchronisation time */
+#define MAX_SYNCHRONISATION_NS 1000
+
+/* How many (MC) receive events that can be queued */
+#define MAX_RECEIVE_EVENTS 8
+
+/* Length of (modified) moving average. */
+#define AVERAGE_LENGTH 16
+
+/* How long an unmatched event or packet can be held */
+#define PKT_EVENT_LIFETIME_MS 10
+
+/* Offsets into PTP packet for identification. These offsets are from the
+ * start of the IP header, not the MAC header. Note that neither PTP V1 nor
+ * PTP V2 permit the use of IPV4 options.
+ */
+#define PTP_DPORT_OFFSET 22
+
+#define PTP_V1_VERSION_LENGTH 2
+#define PTP_V1_VERSION_OFFSET 28
+
+#define PTP_V1_UUID_LENGTH 6
+#define PTP_V1_UUID_OFFSET 50
+
+#define PTP_V1_SEQUENCE_LENGTH 2
+#define PTP_V1_SEQUENCE_OFFSET 58
+
+/* The minimum length of a PTP V1 packet for offsets, etc. to be valid:
+ * includes IP header.
+ */
+#define PTP_V1_MIN_LENGTH 64
+
+#define PTP_V2_VERSION_LENGTH 1
+#define PTP_V2_VERSION_OFFSET 29
+
+#define PTP_V2_UUID_LENGTH 8
+#define PTP_V2_UUID_OFFSET 48
+
+/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
+ * the MC only captures the last six bytes of the clock identity. These values
+ * reflect those, not the ones used in the standard. The standard permits
+ * mapping of V1 UUIDs to V2 UUIDs with these same values.
+ */
+#define PTP_V2_MC_UUID_LENGTH 6
+#define PTP_V2_MC_UUID_OFFSET 50
+
+#define PTP_V2_SEQUENCE_LENGTH 2
+#define PTP_V2_SEQUENCE_OFFSET 58
+
+/* The minimum length of a PTP V2 packet for offsets, etc. to be valid:
+ * includes IP header.
+ */
+#define PTP_V2_MIN_LENGTH 63
+
+#define PTP_MIN_LENGTH 63
+
+#define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */
+#define PTP_EVENT_PORT 319
+#define PTP_GENERAL_PORT 320
+
+/* Annoyingly the format of the version numbers are different between
+ * versions 1 and 2 so it isn't possible to simply look for 1 or 2.
+ */
+#define PTP_VERSION_V1 1
+
+#define PTP_VERSION_V2 2
+#define PTP_VERSION_V2_MASK 0x0f
+
+enum ptp_packet_state {
+ PTP_PACKET_STATE_UNMATCHED = 0,
+ PTP_PACKET_STATE_MATCHED,
+ PTP_PACKET_STATE_TIMED_OUT,
+ PTP_PACKET_STATE_MATCH_UNWANTED
+};
+
+/* NIC synchronised with single word of time only comprising
+ * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds.
+ */
+#define MC_NANOSECOND_BITS 30
+#define MC_NANOSECOND_MASK ((1 << MC_NANOSECOND_BITS) - 1)
+#define MC_SECOND_MASK ((1 << (32 - MC_NANOSECOND_BITS)) - 1)
+
+/* Maximum parts-per-billion adjustment that is acceptable */
+#define MAX_PPB 1000000
+
+/* Number of bits required to hold the above */
+#define MAX_PPB_BITS 20
+
+/* Number of extra bits allowed when calculating fractional ns.
+ * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should
+ * be less than 63.
+ */
+#define PPB_EXTRA_BITS 2
+
+/* Precalculate scale word to avoid long long division at runtime */
+#define PPB_SCALE_WORD ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\
+ MAX_PPB_BITS)) / 1000000000LL)
+
+#define PTP_SYNC_ATTEMPTS 4
+
+/**
+ * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area.
+ * @words: UUID and (partial) sequence number
+ * @expiry: Time after which the packet should be delivered irrespective of
+ * event arrival.
+ * @state: The state of the packet - whether it is ready for processing or
+ * whether that is of no interest.
+ */
+struct efx_ptp_match {
+ u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)];
+ unsigned long expiry;
+ enum ptp_packet_state state;
+};
+
+/**
+ * struct efx_ptp_event_rx - A PTP receive event (from MC)
+ * @seq0: First part of (PTP) UUID
+ * @seq1: Second part of (PTP) UUID and sequence number
+ * @hwtimestamp: Event timestamp
+ */
+struct efx_ptp_event_rx {
+ struct list_head link;
+ u32 seq0;
+ u32 seq1;
+ ktime_t hwtimestamp;
+ unsigned long expiry;
+};
+
+/**
+ * struct efx_ptp_timeset - Synchronisation between host and MC
+ * @host_start: Host time immediately before hardware timestamp taken
+ * @major: Hardware timestamp, major
+ * @minor: Hardware timestamp, minor
+ * @host_end: Host time immediately after hardware timestamp taken
+ * @wait: Number of NIC clock ticks between hardware timestamp being read and
+ * host end time being seen
+ * @window: Difference of host_end and host_start
+ * @valid: Whether this timeset is valid
+ */
+struct efx_ptp_timeset {
+ u32 host_start;
+ u32 major;
+ u32 minor;
+ u32 host_end;
+ u32 wait;
+ u32 window; /* Derived: end - start, allowing for wrap */
+};
+
+/**
+ * struct efx_ptp_data - Precision Time Protocol (PTP) state
+ * @efx: The NIC context
+ * @channel: The PTP channel (Siena only)
+ * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
+ * separate events)
+ * @rxq: Receive queue (awaiting timestamps)
+ * @txq: Transmit queue
+ * @evt_list: List of MC receive events awaiting packets
+ * @evt_free_list: List of free events
+ * @evt_lock: Lock for manipulating evt_list and evt_free_list
+ * @rx_evts: Instantiated events (on evt_list and evt_free_list)
+ * @workwq: Work queue for processing pending PTP operations
+ * @work: Work task
+ * @reset_required: A serious error has occurred and the PTP task needs to be
+ * reset (disable, enable).
+ * @rxfilter_event: Receive filter when operating
+ * @rxfilter_general: Receive filter when operating
+ * @config: Current timestamp configuration
+ * @enabled: PTP operation enabled
+ * @mode: Mode in which PTP operating (PTP version)
+ * @time_format: Time format supported by this NIC
+ * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
+ * @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @min_synchronisation_ns: Minimum acceptable corrected sync window
+ * @ts_corrections.tx: Required driver correction of transmit timestamps
+ * @ts_corrections.rx: Required driver correction of receive timestamps
+ * @ts_corrections.pps_out: PPS output error (information only)
+ * @ts_corrections.pps_in: Required driver correction of PPS input timestamps
+ * @evt_frags: Partly assembled PTP events
+ * @evt_frag_idx: Current fragment number
+ * @evt_code: Last event code
+ * @start: Address at which MC indicates ready for synchronisation
+ * @host_time_pps: Host time at last PPS
+ * @current_adjfreq: Current ppb adjustment.
+ * @phc_clock: Pointer to registered phc device (if primary function)
+ * @phc_clock_info: Registration structure for phc device
+ * @pps_work: pps work task for handling pps events
+ * @pps_workwq: pps work queue
+ * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
+ * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
+ * allocations in main data path).
+ * @good_syncs: Number of successful synchronisations.
+ * @fast_syncs: Number of synchronisations requiring short delay
+ * @bad_syncs: Number of failed synchronisations.
+ * @sync_timeouts: Number of synchronisation timeouts
+ * @no_time_syncs: Number of synchronisations with no good times.
+ * @invalid_sync_windows: Number of sync windows with bad durations.
+ * @undersize_sync_windows: Number of corrected sync windows that are too small
+ * @oversize_sync_windows: Number of corrected sync windows that are too large
+ * @rx_no_timestamp: Number of packets received without a timestamp.
+ * @timeset: Last set of synchronisation statistics.
+ */
+struct efx_ptp_data {
+ struct efx_nic *efx;
+ struct efx_channel *channel;
+ bool rx_ts_inline;
+ struct sk_buff_head rxq;
+ struct sk_buff_head txq;
+ struct list_head evt_list;
+ struct list_head evt_free_list;
+ spinlock_t evt_lock;
+ struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
+ struct workqueue_struct *workwq;
+ struct work_struct work;
+ bool reset_required;
+ u32 rxfilter_event;
+ u32 rxfilter_general;
+ bool rxfilter_installed;
+ struct hwtstamp_config config;
+ bool enabled;
+ unsigned int mode;
+ unsigned int time_format;
+ void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
+ ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
+ s32 correction);
+ unsigned int min_synchronisation_ns;
+ struct {
+ s32 tx;
+ s32 rx;
+ s32 pps_out;
+ s32 pps_in;
+ } ts_corrections;
+ efx_qword_t evt_frags[MAX_EVENT_FRAGS];
+ int evt_frag_idx;
+ int evt_code;
+ struct efx_buffer start;
+ struct pps_event_time host_time_pps;
+ s64 current_adjfreq;
+ struct ptp_clock *phc_clock;
+ struct ptp_clock_info phc_clock_info;
+ struct work_struct pps_work;
+ struct workqueue_struct *pps_workwq;
+ bool nic_ts_enabled;
+ MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+
+ unsigned int good_syncs;
+ unsigned int fast_syncs;
+ unsigned int bad_syncs;
+ unsigned int sync_timeouts;
+ unsigned int no_time_syncs;
+ unsigned int invalid_sync_windows;
+ unsigned int undersize_sync_windows;
+ unsigned int oversize_sync_windows;
+ unsigned int rx_no_timestamp;
+ struct efx_ptp_timeset
+ timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
+};
+
+static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
+static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta);
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts);
+static int efx_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *e_ts);
+static int efx_phc_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *request, int on);
+
+#define PTP_SW_STAT(ext_name, field_name) \
+ { #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
+#define PTP_MC_STAT(ext_name, mcdi_name) \
+ { #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST }
+static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = {
+ PTP_SW_STAT(ptp_good_syncs, good_syncs),
+ PTP_SW_STAT(ptp_fast_syncs, fast_syncs),
+ PTP_SW_STAT(ptp_bad_syncs, bad_syncs),
+ PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts),
+ PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs),
+ PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows),
+ PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows),
+ PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows),
+ PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp),
+ PTP_MC_STAT(ptp_tx_timestamp_packets, TX),
+ PTP_MC_STAT(ptp_rx_timestamp_packets, RX),
+ PTP_MC_STAT(ptp_timestamp_packets, TS),
+ PTP_MC_STAT(ptp_filter_matches, FM),
+ PTP_MC_STAT(ptp_non_filter_matches, NFM),
+};
+#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc)
+static const unsigned long efx_ptp_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
+};
+
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+{
+ if (!efx->ptp_data)
+ return 0;
+
+ return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+ efx_ptp_stat_mask, strings);
+}
+
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN);
+ size_t i;
+ int rc;
+
+ if (!efx->ptp_data)
+ return 0;
+
+ /* Copy software statistics */
+ for (i = 0; i < PTP_STAT_COUNT; i++) {
+ if (efx_ptp_stat_desc[i].dma_width)
+ continue;
+ stats[i] = *(unsigned int *)((char *)efx->ptp_data +
+ efx_ptp_stat_desc[i].offset);
+ }
+
+ /* Fetch MC statistics. We *must* fill in all statistics or
+ * risk leaking kernel memory to userland, so if the MCDI
+ * request fails we pretend we got zeroes.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
+ memset(outbuf, 0, sizeof(outbuf));
+ }
+ efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+ efx_ptp_stat_mask,
+ stats, _MCDI_PTR(outbuf, 0), false);
+
+ return PTP_STAT_COUNT;
+}
+
+/* For Siena platforms NIC time is s and ns */
+static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec ts = ns_to_timespec(ns);
+ *nic_major = ts.tv_sec;
+ *nic_minor = ts.tv_nsec;
+}
+
+static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ ktime_t kt = ktime_set(nic_major, nic_minor);
+ if (correction >= 0)
+ kt = ktime_add_ns(kt, (u64)correction);
+ else
+ kt = ktime_sub_ns(kt, (u64)-correction);
+ return kt;
+}
+
+/* To convert from s27 format to ns we multiply then divide by a power of 2.
+ * For the conversion from ns to s27, the operation is also converted to a
+ * multiply and shift.
+ */
+#define S27_TO_NS_SHIFT (27)
+#define NS_TO_S27_MULT (((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC)
+#define NS_TO_S27_SHIFT (63 - S27_TO_NS_SHIFT)
+#define S27_MINOR_MAX (1 << S27_TO_NS_SHIFT)
+
+/* For Huntington platforms NIC time is in seconds and fractions of a second
+ * where the minor register only uses 27 bits in units of 2^-27s.
+ */
+static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+ struct timespec ts = ns_to_timespec(ns);
+ u32 maj = ts.tv_sec;
+ u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
+ (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
+
+ /* The conversion can result in the minor value exceeding the maximum.
+ * In this case, round up to the next second.
+ */
+ if (min >= S27_MINOR_MAX) {
+ min -= S27_MINOR_MAX;
+ maj++;
+ }
+
+ *nic_major = maj;
+ *nic_minor = min;
+}
+
+static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor)
+{
+ u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC +
+ (1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT);
+ return ktime_set(nic_major, ns);
+}
+
+static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
+ s32 correction)
+{
+ /* Apply the correction and deal with carry */
+ nic_minor += correction;
+ if ((s32)nic_minor < 0) {
+ nic_minor += S27_MINOR_MAX;
+ nic_major--;
+ } else if (nic_minor >= S27_MINOR_MAX) {
+ nic_minor -= S27_MINOR_MAX;
+ nic_major++;
+ }
+
+ return efx_ptp_s27_to_ktime(nic_major, nic_minor);
+}
+
+/* Get PTP attributes and set up time conversions */
+static int efx_ptp_get_attributes(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN);
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+ u32 fmt;
+ size_t out_len;
+
+ /* Get the PTP attributes. If the NIC doesn't support the operation we
+ * use the default format for compatibility with older NICs i.e.
+ * seconds and nanoseconds.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &out_len);
+ if (rc == 0)
+ fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
+ else if (rc == -EINVAL)
+ fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
+ else
+ return rc;
+
+ if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
+ ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
+ } else if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS) {
+ ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
+ ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
+ } else {
+ return -ERANGE;
+ }
+
+ ptp->time_format = fmt;
+
+ /* MC_CMD_PTP_OP_GET_ATTRIBUTES is an extended version of an older
+ * operation MC_CMD_PTP_OP_GET_TIME_FORMAT that also returns a value
+ * to use for the minimum acceptable corrected synchronization window.
+ * If we have the extra information store it. For older firmware that
+ * does not implement the extended command use the default value.
+ */
+ if (rc == 0 && out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+ ptp->min_synchronisation_ns =
+ MCDI_DWORD(outbuf,
+ PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
+ else
+ ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
+
+ return 0;
+}
+
+/* Get PTP timestamp corrections */
+static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN);
+ int rc;
+
+ /* Get the timestamp corrections from the NIC. If this operation is
+ * not supported (older NICs) then no correction is required.
+ */
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP,
+ MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc == 0) {
+ efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
+ efx->ptp_data->ts_corrections.rx = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
+ efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
+ efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
+ PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+ } else if (rc == -EINVAL) {
+ efx->ptp_data->ts_corrections.tx = 0;
+ efx->ptp_data->ts_corrections.rx = 0;
+ efx->ptp_data->ts_corrections.pps_out = 0;
+ efx->ptp_data->ts_corrections.pps_in = 0;
+ } else {
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Enable MCDI PTP support. */
+static int efx_ptp_enable(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
+ efx->ptp_data->channel ?
+ efx->ptp_data->channel->channel : 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_ENABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
+}
+
+/* Disable MCDI PTP support.
+ *
+ * Note that this function should never rely on the presence of ptp_data -
+ * may be called before that exists.
+ */
+static int efx_ptp_disable(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
+ MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ rc = (rc == -EALREADY) ? 0 : rc;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_DISABLE_LEN,
+ outbuf, sizeof(outbuf), rc);
+ return rc;
+}
+
+static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(q))) {
+ local_bh_disable();
+ netif_receive_skb(skb);
+ local_bh_enable();
+ }
+}
+
+static void efx_ptp_handle_no_channel(struct efx_nic *efx)
+{
+ netif_err(efx, drv, efx->net_dev,
+ "ERROR: PTP requires MSI-X and 1 additional interrupt"
+ "vector. PTP disabled\n");
+}
+
+/* Repeatedly send the host time to the MC which will capture the hardware
+ * time.
+ */
+static void efx_ptp_send_times(struct efx_nic *efx,
+ struct pps_event_time *last_time)
+{
+ struct pps_event_time now;
+ struct timespec limit;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct timespec start;
+ int *mc_running = ptp->start.addr;
+
+ pps_get_ts(&now);
+ start = now.ts_real;
+ limit = now.ts_real;
+ timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
+
+ /* Write host time for specified period or until MC is done */
+ while ((timespec_compare(&now.ts_real, &limit) < 0) &&
+ ACCESS_ONCE(*mc_running)) {
+ struct timespec update_time;
+ unsigned int host_time;
+
+ /* Don't update continuously to avoid saturating the PCIe bus */
+ update_time = now.ts_real;
+ timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
+ do {
+ pps_get_ts(&now);
+ } while ((timespec_compare(&now.ts_real, &update_time) < 0) &&
+ ACCESS_ONCE(*mc_running));
+
+ /* Synchronise NIC with single word of time only */
+ host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
+ now.ts_real.tv_nsec);
+ /* Update host time in NIC memory */
+ efx->type->ptp_write_host_time(efx, host_time);
+ }
+ *last_time = now;
+}
+
+/* Read a timeset from the MC's results and partial process. */
+static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
+ struct efx_ptp_timeset *timeset)
+{
+ unsigned start_ns, end_ns;
+
+ timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
+ timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
+ timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
+ timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
+ timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
+
+ /* Ignore seconds */
+ start_ns = timeset->host_start & MC_NANOSECOND_MASK;
+ end_ns = timeset->host_end & MC_NANOSECOND_MASK;
+ /* Allow for rollover */
+ if (end_ns < start_ns)
+ end_ns += NSEC_PER_SEC;
+ /* Determine duration of operation */
+ timeset->window = end_ns - start_ns;
+}
+
+/* Process times received from MC.
+ *
+ * Extract times from returned results, and establish the minimum value
+ * seen. The minimum value represents the "best" possible time and events
+ * too much greater than this are rejected - the machine is, perhaps, too
+ * busy. A number of readings are taken so that, hopefully, at least one good
+ * synchronisation will be seen in the results.
+ */
+static int
+efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
+ size_t response_length,
+ const struct pps_event_time *last_time)
+{
+ unsigned number_readings =
+ MCDI_VAR_ARRAY_LEN(response_length,
+ PTP_OUT_SYNCHRONIZE_TIMESET);
+ unsigned i;
+ unsigned ngood = 0;
+ unsigned last_good = 0;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ u32 last_sec;
+ u32 start_sec;
+ struct timespec delta;
+ ktime_t mc_time;
+
+ if (number_readings == 0)
+ return -EAGAIN;
+
+ /* Read the set of results and find the last good host-MC
+ * synchronization result. The MC times when it finishes reading the
+ * host time so the corrected window time should be fairly constant
+ * for a given platform. Increment stats for any results that appear
+ * to be erroneous.
+ */
+ for (i = 0; i < number_readings; i++) {
+ s32 window, corrected;
+ struct timespec wait;
+
+ efx_ptp_read_timeset(
+ MCDI_ARRAY_STRUCT_PTR(synch_buf,
+ PTP_OUT_SYNCHRONIZE_TIMESET, i),
+ &ptp->timeset[i]);
+
+ wait = ktime_to_timespec(
+ ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
+ window = ptp->timeset[i].window;
+ corrected = window - wait.tv_nsec;
+
+ /* We expect the uncorrected synchronization window to be at
+ * least as large as the interval between host start and end
+ * times. If it is smaller than this then this is mostly likely
+ * to be a consequence of the host's time being adjusted.
+ * Check that the corrected sync window is in a reasonable
+ * range. If it is out of range it is likely to be because an
+ * interrupt or other delay occurred between reading the system
+ * time and writing it to MC memory.
+ */
+ if (window < SYNCHRONISATION_GRANULARITY_NS) {
+ ++ptp->invalid_sync_windows;
+ } else if (corrected >= MAX_SYNCHRONISATION_NS) {
+ ++ptp->oversize_sync_windows;
+ } else if (corrected < ptp->min_synchronisation_ns) {
+ ++ptp->undersize_sync_windows;
+ } else {
+ ngood++;
+ last_good = i;
+ }
+ }
+
+ if (ngood == 0) {
+ netif_warn(efx, drv, efx->net_dev,
+ "PTP no suitable synchronisations\n");
+ return -EAGAIN;
+ }
+
+ /* Calculate delay from last good sync (host time) to last_time.
+ * It is possible that the seconds rolled over between taking
+ * the start reading and the last value written by the host. The
+ * timescales are such that a gap of more than one second is never
+ * expected. delta is *not* normalised.
+ */
+ start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
+ last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
+ if (start_sec != last_sec &&
+ ((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
+ netif_warn(efx, hw, efx->net_dev,
+ "PTP bad synchronisation seconds\n");
+ return -EAGAIN;
+ }
+ delta.tv_sec = (last_sec - start_sec) & 1;
+ delta.tv_nsec =
+ last_time->ts_real.tv_nsec -
+ (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
+
+ /* Convert the NIC time at last good sync into kernel time.
+ * No correction is required - this time is the output of a
+ * firmware process.
+ */
+ mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+ ptp->timeset[last_good].minor, 0);
+
+ /* Calculate delay from NIC top of second to last_time */
+ delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec;
+
+ /* Set PPS timestamp to match NIC top of second */
+ ptp->host_time_pps = *last_time;
+ pps_sub_ts(&ptp->host_time_pps, delta);
+
+ return 0;
+}
+
+/* Synchronize times between the host and the MC */
+static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX);
+ size_t response_length;
+ int rc;
+ unsigned long timeout;
+ struct pps_event_time last_time = {};
+ unsigned int loops = 0;
+ int *start = ptp->start.addr;
+
+ MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
+ MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
+ num_readings);
+ MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR,
+ ptp->start.dma_addr);
+
+ /* Clear flag that signals MC ready */
+ ACCESS_ONCE(*start) = 0;
+ rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
+ MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+ EFX_BUG_ON_PARANOID(rc);
+
+ /* Wait for start from MCDI (or timeout) */
+ timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
+ while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
+ udelay(20); /* Usually start MCDI execution quickly */
+ loops++;
+ }
+
+ if (loops <= 1)
+ ++ptp->fast_syncs;
+ if (!time_before(jiffies, timeout))
+ ++ptp->sync_timeouts;
+
+ if (ACCESS_ONCE(*start))
+ efx_ptp_send_times(efx, &last_time);
+
+ /* Collect results */
+ rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP,
+ MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
+ synch_buf, sizeof(synch_buf),
+ &response_length);
+ if (rc == 0) {
+ rc = efx_ptp_process_times(efx, synch_buf, response_length,
+ &last_time);
+ if (rc == 0)
+ ++ptp->good_syncs;
+ else
+ ++ptp->no_time_syncs;
+ }
+
+ /* Increment the bad syncs counter if the synchronize fails, whatever
+ * the reason.
+ */
+ if (rc != 0)
+ ++ptp->bad_syncs;
+
+ return rc;
+}
+
+/* Transmit a PTP packet, via the MCDI interface, to the wire. */
+static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
+{
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
+ struct skb_shared_hwtstamps timestamps;
+ int rc = -EIO;
+ MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN);
+ size_t len;
+
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ rc = skb_linearize(skb);
+ if (rc != 0)
+ goto fail;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ rc = skb_checksum_help(skb);
+ if (rc != 0)
+ goto fail;
+ }
+ skb_copy_from_linear_data(skb,
+ MCDI_PTR(ptp_data->txbuf,
+ PTP_IN_TRANSMIT_PACKET),
+ skb->len);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP,
+ ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len),
+ txtime, sizeof(txtime), &len);
+ if (rc != 0)
+ goto fail;
+
+ memset(&timestamps, 0, sizeof(timestamps));
+ timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
+ MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
+ ptp_data->ts_corrections.tx);
+
+ skb_tstamp_tx(skb, &timestamps);
+
+ rc = 0;
+
+fail:
+ dev_kfree_skb(skb);
+
+ return rc;
+}
+
+static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct list_head *cursor;
+ struct list_head *next;
+
+ if (ptp->rx_ts_inline)
+ return;
+
+ /* Drop time-expired events */
+ spin_lock_bh(&ptp->evt_lock);
+ if (!list_empty(&ptp->evt_list)) {
+ list_for_each_safe(cursor, next, &ptp->evt_list) {
+ struct efx_ptp_event_rx *evt;
+
+ evt = list_entry(cursor, struct efx_ptp_event_rx,
+ link);
+ if (time_after(jiffies, evt->expiry)) {
+ list_move(&evt->link, &ptp->evt_free_list);
+ netif_warn(efx, hw, efx->net_dev,
+ "PTP rx event dropped\n");
+ }
+ }
+ }
+ spin_unlock_bh(&ptp->evt_lock);
+}
+
+static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
+ struct sk_buff *skb)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ bool evts_waiting;
+ struct list_head *cursor;
+ struct list_head *next;
+ struct efx_ptp_match *match;
+ enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
+
+ WARN_ON_ONCE(ptp->rx_ts_inline);
+
+ spin_lock_bh(&ptp->evt_lock);
+ evts_waiting = !list_empty(&ptp->evt_list);
+ spin_unlock_bh(&ptp->evt_lock);
+
+ if (!evts_waiting)
+ return PTP_PACKET_STATE_UNMATCHED;
+
+ match = (struct efx_ptp_match *)skb->cb;
+ /* Look for a matching timestamp in the event queue */
+ spin_lock_bh(&ptp->evt_lock);
+ list_for_each_safe(cursor, next, &ptp->evt_list) {
+ struct efx_ptp_event_rx *evt;
+
+ evt = list_entry(cursor, struct efx_ptp_event_rx, link);
+ if ((evt->seq0 == match->words[0]) &&
+ (evt->seq1 == match->words[1])) {
+ struct skb_shared_hwtstamps *timestamps;
+
+ /* Match - add in hardware timestamp */
+ timestamps = skb_hwtstamps(skb);
+ timestamps->hwtstamp = evt->hwtimestamp;
+
+ match->state = PTP_PACKET_STATE_MATCHED;
+ rc = PTP_PACKET_STATE_MATCHED;
+ list_move(&evt->link, &ptp->evt_free_list);
+ break;
+ }
+ }
+ spin_unlock_bh(&ptp->evt_lock);
+
+ return rc;
+}
+
+/* Process any queued receive events and corresponding packets
+ *
+ * q is returned with all the packets that are ready for delivery.
+ */
+static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ptp->rxq))) {
+ struct efx_ptp_match *match;
+
+ match = (struct efx_ptp_match *)skb->cb;
+ if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) {
+ __skb_queue_tail(q, skb);
+ } else if (efx_ptp_match_rx(efx, skb) ==
+ PTP_PACKET_STATE_MATCHED) {
+ __skb_queue_tail(q, skb);
+ } else if (time_after(jiffies, match->expiry)) {
+ match->state = PTP_PACKET_STATE_TIMED_OUT;
+ ++ptp->rx_no_timestamp;
+ __skb_queue_tail(q, skb);
+ } else {
+ /* Replace unprocessed entry and stop */
+ skb_queue_head(&ptp->rxq, skb);
+ break;
+ }
+ }
+}
+
+/* Complete processing of a received packet */
+static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
+{
+ local_bh_disable();
+ netif_receive_skb(skb);
+ local_bh_enable();
+}
+
+static void efx_ptp_remove_multicast_filters(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ if (ptp->rxfilter_installed) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_general);
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ ptp->rxfilter_installed = false;
+ }
+}
+
+static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_filter_spec rxfilter;
+ int rc;
+
+ if (!ptp->channel || ptp->rxfilter_installed)
+ return 0;
+
+ /* Must filter on both event and general ports to ensure
+ * that there is no packet re-ordering.
+ */
+ efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+ efx_rx_queue_index(
+ efx_channel_get_rx_queue(ptp->channel)));
+ rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
+ htonl(PTP_ADDRESS),
+ htons(PTP_EVENT_PORT));
+ if (rc != 0)
+ return rc;
+
+ rc = efx_filter_insert_filter(efx, &rxfilter, true);
+ if (rc < 0)
+ return rc;
+ ptp->rxfilter_event = rc;
+
+ efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+ efx_rx_queue_index(
+ efx_channel_get_rx_queue(ptp->channel)));
+ rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
+ htonl(PTP_ADDRESS),
+ htons(PTP_GENERAL_PORT));
+ if (rc != 0)
+ goto fail;
+
+ rc = efx_filter_insert_filter(efx, &rxfilter, true);
+ if (rc < 0)
+ goto fail;
+ ptp->rxfilter_general = rc;
+
+ ptp->rxfilter_installed = true;
+ return 0;
+
+fail:
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ ptp->rxfilter_event);
+ return rc;
+}
+
+static int efx_ptp_start(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int rc;
+
+ ptp->reset_required = false;
+
+ rc = efx_ptp_insert_multicast_filters(efx);
+ if (rc)
+ return rc;
+
+ rc = efx_ptp_enable(efx);
+ if (rc != 0)
+ goto fail;
+
+ ptp->evt_frag_idx = 0;
+ ptp->current_adjfreq = 0;
+
+ return 0;
+
+fail:
+ efx_ptp_remove_multicast_filters(efx);
+ return rc;
+}
+
+static int efx_ptp_stop(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct list_head *cursor;
+ struct list_head *next;
+ int rc;
+
+ if (ptp == NULL)
+ return 0;
+
+ rc = efx_ptp_disable(efx);
+
+ efx_ptp_remove_multicast_filters(efx);
+
+ /* Make sure RX packets are really delivered */
+ efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
+ skb_queue_purge(&efx->ptp_data->txq);
+
+ /* Drop any pending receive events */
+ spin_lock_bh(&efx->ptp_data->evt_lock);
+ list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
+ list_move(cursor, &efx->ptp_data->evt_free_list);
+ }
+ spin_unlock_bh(&efx->ptp_data->evt_lock);
+
+ return rc;
+}
+
+static int efx_ptp_restart(struct efx_nic *efx)
+{
+ if (efx->ptp_data && efx->ptp_data->enabled)
+ return efx_ptp_start(efx);
+ return 0;
+}
+
+static void efx_ptp_pps_worker(struct work_struct *work)
+{
+ struct efx_ptp_data *ptp =
+ container_of(work, struct efx_ptp_data, pps_work);
+ struct efx_nic *efx = ptp->efx;
+ struct ptp_clock_event ptp_evt;
+
+ if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
+ return;
+
+ ptp_evt.type = PTP_CLOCK_PPSUSR;
+ ptp_evt.pps_times = ptp->host_time_pps;
+ ptp_clock_event(ptp->phc_clock, &ptp_evt);
+}
+
+static void efx_ptp_worker(struct work_struct *work)
+{
+ struct efx_ptp_data *ptp_data =
+ container_of(work, struct efx_ptp_data, work);
+ struct efx_nic *efx = ptp_data->efx;
+ struct sk_buff *skb;
+ struct sk_buff_head tempq;
+
+ if (ptp_data->reset_required) {
+ efx_ptp_stop(efx);
+ efx_ptp_start(efx);
+ return;
+ }
+
+ efx_ptp_drop_time_expired_events(efx);
+
+ __skb_queue_head_init(&tempq);
+ efx_ptp_process_events(efx, &tempq);
+
+ while ((skb = skb_dequeue(&ptp_data->txq)))
+ efx_ptp_xmit_skb(efx, skb);
+
+ while ((skb = __skb_dequeue(&tempq)))
+ efx_ptp_process_rx(efx, skb);
+}
+
+static const struct ptp_clock_info efx_phc_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "sfc",
+ .max_adj = MAX_PPB,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 1,
+ .adjfreq = efx_phc_adjfreq,
+ .adjtime = efx_phc_adjtime,
+ .gettime64 = efx_phc_gettime,
+ .settime64 = efx_phc_settime,
+ .enable = efx_phc_enable,
+};
+
+/* Initialise PTP state. */
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
+{
+ struct efx_ptp_data *ptp;
+ int rc = 0;
+ unsigned int pos;
+
+ ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
+ efx->ptp_data = ptp;
+ if (!efx->ptp_data)
+ return -ENOMEM;
+
+ ptp->efx = efx;
+ ptp->channel = channel;
+ ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+
+ rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
+ if (rc != 0)
+ goto fail1;
+
+ skb_queue_head_init(&ptp->rxq);
+ skb_queue_head_init(&ptp->txq);
+ ptp->workwq = create_singlethread_workqueue("sfc_ptp");
+ if (!ptp->workwq) {
+ rc = -ENOMEM;
+ goto fail2;
+ }
+
+ INIT_WORK(&ptp->work, efx_ptp_worker);
+ ptp->config.flags = 0;
+ ptp->config.tx_type = HWTSTAMP_TX_OFF;
+ ptp->config.rx_filter = HWTSTAMP_FILTER_NONE;
+ INIT_LIST_HEAD(&ptp->evt_list);
+ INIT_LIST_HEAD(&ptp->evt_free_list);
+ spin_lock_init(&ptp->evt_lock);
+ for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
+ list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
+
+ /* Get the NIC PTP attributes and set up time conversions */
+ rc = efx_ptp_get_attributes(efx);
+ if (rc < 0)
+ goto fail3;
+
+ /* Get the timestamp corrections */
+ rc = efx_ptp_get_timestamp_corrections(efx);
+ if (rc < 0)
+ goto fail3;
+
+ if (efx->mcdi->fn_flags &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) {
+ ptp->phc_clock_info = efx_phc_clock_info;
+ ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
+ &efx->pci_dev->dev);
+ if (IS_ERR(ptp->phc_clock)) {
+ rc = PTR_ERR(ptp->phc_clock);
+ goto fail3;
+ }
+
+ INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+ ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+ if (!ptp->pps_workwq) {
+ rc = -ENOMEM;
+ goto fail4;
+ }
+ }
+ ptp->nic_ts_enabled = false;
+
+ return 0;
+fail4:
+ ptp_clock_unregister(efx->ptp_data->phc_clock);
+
+fail3:
+ destroy_workqueue(efx->ptp_data->workwq);
+
+fail2:
+ efx_nic_free_buffer(efx, &ptp->start);
+
+fail1:
+ kfree(efx->ptp_data);
+ efx->ptp_data = NULL;
+
+ return rc;
+}
+
+/* Initialise PTP channel.
+ *
+ * Setting core_index to zero causes the queue to be initialised and doesn't
+ * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
+ */
+static int efx_ptp_probe_channel(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ channel->irq_moderation = 0;
+ channel->rx_queue.core_index = 0;
+
+ return efx_ptp_probe(efx, channel);
+}
+
+void efx_ptp_remove(struct efx_nic *efx)
+{
+ if (!efx->ptp_data)
+ return;
+
+ (void)efx_ptp_disable(efx);
+
+ cancel_work_sync(&efx->ptp_data->work);
+ cancel_work_sync(&efx->ptp_data->pps_work);
+
+ skb_queue_purge(&efx->ptp_data->rxq);
+ skb_queue_purge(&efx->ptp_data->txq);
+
+ if (efx->ptp_data->phc_clock) {
+ destroy_workqueue(efx->ptp_data->pps_workwq);
+ ptp_clock_unregister(efx->ptp_data->phc_clock);
+ }
+
+ destroy_workqueue(efx->ptp_data->workwq);
+
+ efx_nic_free_buffer(efx, &efx->ptp_data->start);
+ kfree(efx->ptp_data);
+}
+
+static void efx_ptp_remove_channel(struct efx_channel *channel)
+{
+ efx_ptp_remove(channel->efx);
+}
+
+static void efx_ptp_get_channel_name(struct efx_channel *channel,
+ char *buf, size_t len)
+{
+ snprintf(buf, len, "%s-ptp", channel->efx->name);
+}
+
+/* Determine whether this packet should be processed by the PTP module
+ * or transmitted conventionally.
+ */
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+ return efx->ptp_data &&
+ efx->ptp_data->enabled &&
+ skb->len >= PTP_MIN_LENGTH &&
+ skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
+ likely(skb->protocol == htons(ETH_P_IP)) &&
+ skb_transport_header_was_set(skb) &&
+ skb_network_header_len(skb) >= sizeof(struct iphdr) &&
+ ip_hdr(skb)->protocol == IPPROTO_UDP &&
+ skb_headlen(skb) >=
+ skb_transport_offset(skb) + sizeof(struct udphdr) &&
+ udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
+}
+
+/* Receive a PTP packet. Packets are queued until the arrival of
+ * the receive timestamp from the MC - this will probably occur after the
+ * packet arrival because of the processing in the MC.
+ */
+static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
+ u8 *match_data_012, *match_data_345;
+ unsigned int version;
+ u8 *data;
+
+ match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
+
+ /* Correct version? */
+ if (ptp->mode == MC_CMD_PTP_MODE_V1) {
+ if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
+ return false;
+ }
+ data = skb->data;
+ version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]);
+ if (version != PTP_VERSION_V1) {
+ return false;
+ }
+
+ /* PTP V1 uses all six bytes of the UUID to match the packet
+ * to the timestamp
+ */
+ match_data_012 = data + PTP_V1_UUID_OFFSET;
+ match_data_345 = data + PTP_V1_UUID_OFFSET + 3;
+ } else {
+ if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
+ return false;
+ }
+ data = skb->data;
+ version = data[PTP_V2_VERSION_OFFSET];
+ if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
+ return false;
+ }
+
+ /* The original V2 implementation uses bytes 2-7 of
+ * the UUID to match the packet to the timestamp. This
+ * discards two of the bytes of the MAC address used
+ * to create the UUID (SF bug 33070). The PTP V2
+ * enhanced mode fixes this issue and uses bytes 0-2
+ * and byte 5-7 of the UUID.
+ */
+ match_data_345 = data + PTP_V2_UUID_OFFSET + 5;
+ if (ptp->mode == MC_CMD_PTP_MODE_V2) {
+ match_data_012 = data + PTP_V2_UUID_OFFSET + 2;
+ } else {
+ match_data_012 = data + PTP_V2_UUID_OFFSET + 0;
+ BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
+ }
+ }
+
+ /* Does this packet require timestamping? */
+ if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
+ match->state = PTP_PACKET_STATE_UNMATCHED;
+
+ /* We expect the sequence number to be in the same position in
+ * the packet for PTP V1 and V2
+ */
+ BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
+ BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
+
+ /* Extract UUID/Sequence information */
+ match->words[0] = (match_data_012[0] |
+ (match_data_012[1] << 8) |
+ (match_data_012[2] << 16) |
+ (match_data_345[0] << 24));
+ match->words[1] = (match_data_345[1] |
+ (match_data_345[2] << 8) |
+ (data[PTP_V1_SEQUENCE_OFFSET +
+ PTP_V1_SEQUENCE_LENGTH - 1] <<
+ 16));
+ } else {
+ match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
+ }
+
+ skb_queue_tail(&ptp->rxq, skb);
+ queue_work(ptp->workwq, &ptp->work);
+
+ return true;
+}
+
+/* Transmit a PTP packet. This has to be transmitted by the MC
+ * itself, through an MCDI call. MCDI calls aren't permitted
+ * in the transmit path so defer the actual transmission to a suitable worker.
+ */
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ skb_queue_tail(&ptp->txq, skb);
+
+ if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) &&
+ (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM))
+ efx_xmit_hwtstamp_pending(skb);
+ queue_work(ptp->workwq, &ptp->work);
+
+ return NETDEV_TX_OK;
+}
+
+int efx_ptp_get_mode(struct efx_nic *efx)
+{
+ return efx->ptp_data->mode;
+}
+
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+ unsigned int new_mode)
+{
+ if ((enable_wanted != efx->ptp_data->enabled) ||
+ (enable_wanted && (efx->ptp_data->mode != new_mode))) {
+ int rc = 0;
+
+ if (enable_wanted) {
+ /* Change of mode requires disable */
+ if (efx->ptp_data->enabled &&
+ (efx->ptp_data->mode != new_mode)) {
+ efx->ptp_data->enabled = false;
+ rc = efx_ptp_stop(efx);
+ if (rc != 0)
+ return rc;
+ }
+
+ /* Set new operating mode and establish
+ * baseline synchronisation, which must
+ * succeed.
+ */
+ efx->ptp_data->mode = new_mode;
+ if (netif_running(efx->net_dev))
+ rc = efx_ptp_start(efx);
+ if (rc == 0) {
+ rc = efx_ptp_synchronize(efx,
+ PTP_SYNC_ATTEMPTS * 2);
+ if (rc != 0)
+ efx_ptp_stop(efx);
+ }
+ } else {
+ rc = efx_ptp_stop(efx);
+ }
+
+ if (rc != 0)
+ return rc;
+
+ efx->ptp_data->enabled = enable_wanted;
+ }
+
+ return 0;
+}
+
+static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
+{
+ int rc;
+
+ if (init->flags)
+ return -EINVAL;
+
+ if ((init->tx_type != HWTSTAMP_TX_OFF) &&
+ (init->tx_type != HWTSTAMP_TX_ON))
+ return -ERANGE;
+
+ rc = efx->type->ptp_set_ts_config(efx, init);
+ if (rc)
+ return rc;
+
+ efx->ptp_data->config = *init;
+ return 0;
+}
+
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ struct efx_nic *primary = efx->primary;
+
+ ASSERT_RTNL();
+
+ if (!ptp)
+ return;
+
+ ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE);
+ if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
+ ts_info->phc_index =
+ ptp_clock_index(primary->ptp_data->phc_clock);
+ ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
+ ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
+}
+
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int rc;
+
+ /* Not a PTP enabled port */
+ if (!efx->ptp_data)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ rc = efx_ptp_ts_init(efx, &config);
+ if (rc != 0)
+ return rc;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config))
+ ? -EFAULT : 0;
+}
+
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+{
+ if (!efx->ptp_data)
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, &efx->ptp_data->config,
+ sizeof(efx->ptp_data->config)) ? -EFAULT : 0;
+}
+
+static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+
+ netif_err(efx, hw, efx->net_dev,
+ "PTP unexpected event length: got %d expected %d\n",
+ ptp->evt_frag_idx, expected_frag_len);
+ ptp->reset_required = true;
+ queue_work(ptp->workwq, &ptp->work);
+}
+
+/* Process a completed receive event. Put it on the event queue and
+ * start worker thread. This is required because event and their
+ * correspoding packets may come in either order.
+ */
+static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+ struct efx_ptp_event_rx *evt = NULL;
+
+ if (WARN_ON_ONCE(ptp->rx_ts_inline))
+ return;
+
+ if (ptp->evt_frag_idx != 3) {
+ ptp_event_failure(efx, 3);
+ return;
+ }
+
+ spin_lock_bh(&ptp->evt_lock);
+ if (!list_empty(&ptp->evt_free_list)) {
+ evt = list_first_entry(&ptp->evt_free_list,
+ struct efx_ptp_event_rx, link);
+ list_del(&evt->link);
+
+ evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA);
+ evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2],
+ MCDI_EVENT_SRC) |
+ (EFX_QWORD_FIELD(ptp->evt_frags[1],
+ MCDI_EVENT_SRC) << 8) |
+ (EFX_QWORD_FIELD(ptp->evt_frags[0],
+ MCDI_EVENT_SRC) << 16));
+ evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
+ EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
+ EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
+ ptp->ts_corrections.rx);
+ evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
+ list_add_tail(&evt->link, &ptp->evt_list);
+
+ queue_work(ptp->workwq, &ptp->work);
+ } else if (net_ratelimit()) {
+ /* Log a rate-limited warning message. */
+ netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
+ }
+ spin_unlock_bh(&ptp->evt_lock);
+}
+
+static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+ int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA);
+ if (ptp->evt_frag_idx != 1) {
+ ptp_event_failure(efx, 1);
+ return;
+ }
+
+ netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code);
+}
+
+static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+ if (ptp->nic_ts_enabled)
+ queue_work(ptp->pps_workwq, &ptp->pps_work);
+}
+
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
+ int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
+
+ if (!ptp) {
+ if (net_ratelimit())
+ netif_warn(efx, drv, efx->net_dev,
+ "Received PTP event but PTP not set up\n");
+ return;
+ }
+
+ if (!ptp->enabled)
+ return;
+
+ if (ptp->evt_frag_idx == 0) {
+ ptp->evt_code = code;
+ } else if (ptp->evt_code != code) {
+ netif_err(efx, hw, efx->net_dev,
+ "PTP out of sequence event %d\n", code);
+ ptp->evt_frag_idx = 0;
+ }
+
+ ptp->evt_frags[ptp->evt_frag_idx++] = *ev;
+ if (!MCDI_EVENT_FIELD(*ev, CONT)) {
+ /* Process resulting event */
+ switch (code) {
+ case MCDI_EVENT_CODE_PTP_RX:
+ ptp_event_rx(efx, ptp);
+ break;
+ case MCDI_EVENT_CODE_PTP_FAULT:
+ ptp_event_fault(efx, ptp);
+ break;
+ case MCDI_EVENT_CODE_PTP_PPS:
+ ptp_event_pps(efx, ptp);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "PTP unknown event %d\n", code);
+ break;
+ }
+ ptp->evt_frag_idx = 0;
+ } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) {
+ netif_err(efx, hw, efx->net_dev,
+ "PTP too many event fragments\n");
+ ptp->evt_frag_idx = 0;
+ }
+}
+
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
+{
+ channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
+ channel->sync_timestamp_minor =
+ MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_26_19) << 19;
+ /* if sync events have been disabled then we want to silently ignore
+ * this event, so throw away result.
+ */
+ (void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED,
+ SYNC_EVENTS_VALID);
+}
+
+/* make some assumptions about the time representation rather than abstract it,
+ * since we currently only support one type of inline timestamping and only on
+ * EF10.
+ */
+#define MINOR_TICKS_PER_SECOND 0x8000000
+/* Fuzz factor for sync events to be out of order with RX events */
+#define FUZZ (MINOR_TICKS_PER_SECOND / 10)
+#define EXPECTED_SYNC_EVENTS_PER_SECOND 4
+
+static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_ts_offset;
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+ struct sk_buff *skb)
+{
+ struct efx_nic *efx = channel->efx;
+ u32 pkt_timestamp_major, pkt_timestamp_minor;
+ u32 diff, carry;
+ struct skb_shared_hwtstamps *timestamps;
+
+ pkt_timestamp_minor = (efx_rx_buf_timestamp_minor(efx,
+ skb_mac_header(skb)) +
+ (u32) efx->ptp_data->ts_corrections.rx) &
+ (MINOR_TICKS_PER_SECOND - 1);
+
+ /* get the difference between the packet and sync timestamps,
+ * modulo one second
+ */
+ diff = (pkt_timestamp_minor - channel->sync_timestamp_minor) &
+ (MINOR_TICKS_PER_SECOND - 1);
+ /* do we roll over a second boundary and need to carry the one? */
+ carry = channel->sync_timestamp_minor + diff > MINOR_TICKS_PER_SECOND ?
+ 1 : 0;
+
+ if (diff <= MINOR_TICKS_PER_SECOND / EXPECTED_SYNC_EVENTS_PER_SECOND +
+ FUZZ) {
+ /* packet is ahead of the sync event by a quarter of a second or
+ * less (allowing for fuzz)
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major + carry;
+ } else if (diff >= MINOR_TICKS_PER_SECOND - FUZZ) {
+ /* packet is behind the sync event but within the fuzz factor.
+ * This means the RX packet and sync event crossed as they were
+ * placed on the event queue, which can sometimes happen.
+ */
+ pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry;
+ } else {
+ /* it's outside tolerance in both directions. this might be
+ * indicative of us missing sync events for some reason, so
+ * we'll call it an error rather than risk giving a bogus
+ * timestamp.
+ */
+ netif_vdbg(efx, drv, efx->net_dev,
+ "packet timestamp %x too far from sync event %x:%x\n",
+ pkt_timestamp_minor, channel->sync_timestamp_major,
+ channel->sync_timestamp_minor);
+ return;
+ }
+
+ /* attach the timestamps to the skb */
+ timestamps = skb_hwtstamps(skb);
+ timestamps->hwtstamp =
+ efx_ptp_s27_to_ktime(pkt_timestamp_major, pkt_timestamp_minor);
+}
+
+static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct efx_ptp_data *ptp_data = container_of(ptp,
+ struct efx_ptp_data,
+ phc_clock_info);
+ struct efx_nic *efx = ptp_data->efx;
+ MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
+ s64 adjustment_ns;
+ int rc;
+
+ if (delta > MAX_PPB)
+ delta = MAX_PPB;
+ else if (delta < -MAX_PPB)
+ delta = -MAX_PPB;
+
+ /* Convert ppb to fixed point ns. */
+ adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >>
+ (PPB_EXTRA_BITS + MAX_PPB_BITS));
+
+ MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+ MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns);
+ MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
+ MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
+ NULL, 0, NULL);
+ if (rc != 0)
+ return rc;
+
+ ptp_data->current_adjfreq = adjustment_ns;
+ return 0;
+}
+
+static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ u32 nic_major, nic_minor;
+ struct efx_ptp_data *ptp_data = container_of(ptp,
+ struct efx_ptp_data,
+ phc_clock_info);
+ struct efx_nic *efx = ptp_data->efx;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
+
+ efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor);
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major);
+ MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor);
+ return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct efx_ptp_data *ptp_data = container_of(ptp,
+ struct efx_ptp_data,
+ phc_clock_info);
+ struct efx_nic *efx = ptp_data->efx;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
+ int rc;
+ ktime_t kt;
+
+ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc != 0)
+ return rc;
+
+ kt = ptp_data->nic_to_kernel_time(
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR),
+ MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0);
+ *ts = ktime_to_timespec64(kt);
+ return 0;
+}
+
+static int efx_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *e_ts)
+{
+ /* Get the current NIC time, efx_phc_gettime.
+ * Subtract from the desired time to get the offset
+ * call efx_phc_adjtime with the offset
+ */
+ int rc;
+ struct timespec64 time_now;
+ struct timespec64 delta;
+
+ rc = efx_phc_gettime(ptp, &time_now);
+ if (rc != 0)
+ return rc;
+
+ delta = timespec64_sub(*e_ts, time_now);
+
+ rc = efx_phc_adjtime(ptp, timespec64_to_ns(&delta));
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+static int efx_phc_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *request,
+ int enable)
+{
+ struct efx_ptp_data *ptp_data = container_of(ptp,
+ struct efx_ptp_data,
+ phc_clock_info);
+ if (request->type != PTP_CLK_REQ_PPS)
+ return -EOPNOTSUPP;
+
+ ptp_data->nic_ts_enabled = !!enable;
+ return 0;
+}
+
+static const struct efx_channel_type efx_ptp_channel_type = {
+ .handle_no_channel = efx_ptp_handle_no_channel,
+ .pre_probe = efx_ptp_probe_channel,
+ .post_remove = efx_ptp_remove_channel,
+ .get_name = efx_ptp_get_channel_name,
+ /* no copy operation; there is no need to reallocate this channel */
+ .receive_skb = efx_ptp_rx,
+ .keep_eventq = false,
+};
+
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx)
+{
+ /* Check whether PTP is implemented on this NIC. The DISABLE
+ * operation will succeed if and only if it is implemented.
+ */
+ if (efx_ptp_disable(efx) == 0)
+ efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
+ &efx_ptp_channel_type;
+}
+
+void efx_ptp_start_datapath(struct efx_nic *efx)
+{
+ if (efx_ptp_restart(efx))
+ netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
+ /* re-enable timestamping if it was previously enabled */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, true, true);
+}
+
+void efx_ptp_stop_datapath(struct efx_nic *efx)
+{
+ /* temporarily disable timestamping */
+ if (efx->type->ptp_set_ts_sync_events)
+ efx->type->ptp_set_ts_sync_events(efx, false, true);
+ efx_ptp_stop(efx);
+}
diff --git a/kernel/drivers/net/ethernet/sfc/qt202x_phy.c b/kernel/drivers/net/ethernet/sfc/qt202x_phy.c
new file mode 100644
index 000000000..efa3612af
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -0,0 +1,495 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+/*
+ * Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details
+ */
+
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include "efx.h"
+#include "mdio_10g.h"
+#include "phy.h"
+#include "nic.h"
+
+#define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS | \
+ MDIO_DEVS_PMAPMD | \
+ MDIO_DEVS_PHYXS)
+
+#define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) | \
+ (1 << LOOPBACK_PMAPMD) | \
+ (1 << LOOPBACK_PHYXS_WS))
+
+/****************************************************************************/
+/* Quake-specific MDIO registers */
+#define MDIO_QUAKE_LED0_REG (0xD006)
+
+/* QT2025C only */
+#define PCS_FW_HEARTBEAT_REG 0xd7ee
+#define PCS_FW_HEARTB_LBN 0
+#define PCS_FW_HEARTB_WIDTH 8
+#define PCS_FW_PRODUCT_CODE_1 0xd7f0
+#define PCS_FW_VERSION_1 0xd7f3
+#define PCS_FW_BUILD_1 0xd7f6
+#define PCS_UC8051_STATUS_REG 0xd7fd
+#define PCS_UC_STATUS_LBN 0
+#define PCS_UC_STATUS_WIDTH 8
+#define PCS_UC_STATUS_FW_SAVE 0x20
+#define PMA_PMD_MODE_REG 0xc301
+#define PMA_PMD_RXIN_SEL_LBN 6
+#define PMA_PMD_FTX_CTRL2_REG 0xc309
+#define PMA_PMD_FTX_STATIC_LBN 13
+#define PMA_PMD_VEND1_REG 0xc001
+#define PMA_PMD_VEND1_LBTXD_LBN 15
+#define PCS_VEND1_REG 0xc000
+#define PCS_VEND1_LBTXD_LBN 5
+
+void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
+{
+ int addr = MDIO_QUAKE_LED0_REG + led;
+ efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
+}
+
+struct qt202x_phy_data {
+ enum efx_phy_mode phy_mode;
+ bool bug17190_in_bad_state;
+ unsigned long bug17190_timer;
+ u32 firmware_ver;
+};
+
+#define QT2022C2_MAX_RESET_TIME 500
+#define QT2022C2_RESET_WAIT 10
+
+#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
+#define QT2025C_HEARTB_WAIT 100
+#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
+#define QT2025C_FWSTART_WAIT 100
+
+#define BUG17190_INTERVAL (2 * HZ)
+
+static int qt2025c_wait_heartbeat(struct efx_nic *efx)
+{
+ unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
+ int reg, old_counter = 0;
+
+ /* Wait for firmware heartbeat to start */
+ for (;;) {
+ int counter;
+ reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG);
+ if (reg < 0)
+ return reg;
+ counter = ((reg >> PCS_FW_HEARTB_LBN) &
+ ((1 << PCS_FW_HEARTB_WIDTH) - 1));
+ if (old_counter == 0)
+ old_counter = counter;
+ else if (counter != old_counter)
+ break;
+ if (time_after(jiffies, timeout)) {
+ /* Some cables have EEPROMs that conflict with the
+ * PHY's on-board EEPROM so it cannot load firmware */
+ netif_err(efx, hw, efx->net_dev,
+ "If an SFP+ direct attach cable is"
+ " connected, please check that it complies"
+ " with the SFP+ specification\n");
+ return -ETIMEDOUT;
+ }
+ msleep(QT2025C_HEARTB_WAIT);
+ }
+
+ return 0;
+}
+
+static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
+{
+ unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
+ int reg;
+
+ /* Wait for firmware status to look good */
+ for (;;) {
+ reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
+ if (reg < 0)
+ return reg;
+ if ((reg &
+ ((1 << PCS_UC_STATUS_WIDTH) - 1) << PCS_UC_STATUS_LBN) >=
+ PCS_UC_STATUS_FW_SAVE)
+ break;
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ msleep(QT2025C_FWSTART_WAIT);
+ }
+
+ return 0;
+}
+
+static void qt2025c_restart_firmware(struct efx_nic *efx)
+{
+ /* Restart microcontroller execution of firmware from RAM */
+ efx_mdio_write(efx, 3, 0xe854, 0x00c0);
+ efx_mdio_write(efx, 3, 0xe854, 0x0040);
+ msleep(50);
+}
+
+static int qt2025c_wait_reset(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = qt2025c_wait_heartbeat(efx);
+ if (rc != 0)
+ return rc;
+
+ rc = qt2025c_wait_fw_status_good(efx);
+ if (rc == -ETIMEDOUT) {
+ /* Bug 17689: occasionally heartbeat starts but firmware status
+ * code never progresses beyond 0x00. Try again, once, after
+ * restarting execution of the firmware image. */
+ netif_dbg(efx, hw, efx->net_dev,
+ "bashing QT2025C microcontroller\n");
+ qt2025c_restart_firmware(efx);
+ rc = qt2025c_wait_heartbeat(efx);
+ if (rc != 0)
+ return rc;
+ rc = qt2025c_wait_fw_status_good(efx);
+ }
+
+ return rc;
+}
+
+static void qt2025c_firmware_id(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+ u8 firmware_id[9];
+ size_t i;
+
+ for (i = 0; i < sizeof(firmware_id); i++)
+ firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
+ PCS_FW_PRODUCT_CODE_1 + i);
+ netif_info(efx, probe, efx->net_dev,
+ "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
+ (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
+ firmware_id[3] >> 4, firmware_id[3] & 0xf,
+ firmware_id[4], firmware_id[5],
+ firmware_id[6], firmware_id[7], firmware_id[8]);
+ phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
+ ((firmware_id[3] & 0x0f) << 16) |
+ (firmware_id[4] << 8) | firmware_id[5];
+}
+
+static void qt2025c_bug17190_workaround(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+
+ /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
+ * layers up, but PCS down (no block_lock). If we notice this state
+ * persisting for a couple of seconds, we switch PMA/PMD loopback
+ * briefly on and then off again, which is normally sufficient to
+ * recover it.
+ */
+ if (efx->link_state.up ||
+ !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
+ phy_data->bug17190_in_bad_state = false;
+ return;
+ }
+
+ if (!phy_data->bug17190_in_bad_state) {
+ phy_data->bug17190_in_bad_state = true;
+ phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+ return;
+ }
+
+ if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
+ netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n");
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_PMA_CTRL1_LOOPBACK, true);
+ msleep(100);
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_PMA_CTRL1_LOOPBACK, false);
+ phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+ }
+}
+
+static int qt2025c_select_phy_mode(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+ struct falcon_board *board = falcon_board(efx);
+ int reg, rc, i;
+ uint16_t phy_op_mode;
+
+ /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
+ * Self-Configure mode. Don't attempt any switching if we encounter
+ * older firmware. */
+ if (phy_data->firmware_ver < 0x02000100)
+ return 0;
+
+ /* In general we will get optimal behaviour in "SFP+ Self-Configure"
+ * mode; however, that powers down most of the PHY when no module is
+ * present, so we must use a different mode (any fixed mode will do)
+ * to be sure that loopbacks will work. */
+ phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
+
+ /* Only change mode if really necessary */
+ reg = efx_mdio_read(efx, 1, 0xc319);
+ if ((reg & 0x0038) == phy_op_mode)
+ return 0;
+ netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n",
+ phy_op_mode);
+
+ /* This sequence replicates the register writes configured in the boot
+ * EEPROM (including the differences between board revisions), except
+ * that the operating mode is changed, and the PHY is prevented from
+ * unnecessarily reloading the main firmware image again. */
+ efx_mdio_write(efx, 1, 0xc300, 0x0000);
+ /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
+ * STOPs onto the firmware/module I2C bus to reset it, varies across
+ * board revisions, as the bus is connected to different GPIO/LED
+ * outputs on the PHY.) */
+ if (board->major == 0 && board->minor < 2) {
+ efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ for (i = 0; i < 9; i++) {
+ efx_mdio_write(efx, 1, 0xc303, 0x4488);
+ efx_mdio_write(efx, 1, 0xc303, 0x4480);
+ efx_mdio_write(efx, 1, 0xc303, 0x4490);
+ efx_mdio_write(efx, 1, 0xc303, 0x4498);
+ }
+ } else {
+ efx_mdio_write(efx, 1, 0xc303, 0x0920);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ for (i = 0; i < 9; i++) {
+ efx_mdio_write(efx, 1, 0xc303, 0x0900);
+ efx_mdio_write(efx, 1, 0xd008, 0x0005);
+ efx_mdio_write(efx, 1, 0xc303, 0x0920);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ }
+ efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ }
+ efx_mdio_write(efx, 1, 0xc303, 0x4900);
+ efx_mdio_write(efx, 1, 0xc302, 0x0004);
+ efx_mdio_write(efx, 1, 0xc316, 0x0013);
+ efx_mdio_write(efx, 1, 0xc318, 0x0054);
+ efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
+ efx_mdio_write(efx, 1, 0xc31a, 0x0098);
+ efx_mdio_write(efx, 3, 0x0026, 0x0e00);
+ efx_mdio_write(efx, 3, 0x0027, 0x0013);
+ efx_mdio_write(efx, 3, 0x0028, 0xa528);
+ efx_mdio_write(efx, 1, 0xd006, 0x000a);
+ efx_mdio_write(efx, 1, 0xd007, 0x0009);
+ efx_mdio_write(efx, 1, 0xd008, 0x0004);
+ /* This additional write is not present in the boot EEPROM. It
+ * prevents the PHY's internal boot ROM doing another pointless (and
+ * slow) reload of the firmware image (the microcontroller's code
+ * memory is not affected by the microcontroller reset). */
+ efx_mdio_write(efx, 1, 0xc317, 0x00ff);
+ /* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
+ * restart doesn't reset it. We need to do that ourselves. */
+ efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
+ 1 << PMA_PMD_RXIN_SEL_LBN, false);
+ efx_mdio_write(efx, 1, 0xc300, 0x0002);
+ msleep(20);
+
+ /* Restart microcontroller execution of firmware from RAM */
+ qt2025c_restart_firmware(efx);
+
+ /* Wait for the microcontroller to be ready again */
+ rc = qt2025c_wait_reset(efx);
+ if (rc < 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "PHY microcontroller reset during mode switch "
+ "timed out\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qt202x_reset_phy(struct efx_nic *efx)
+{
+ int rc;
+
+ if (efx->phy_type == PHY_TYPE_QT2025C) {
+ /* Wait for the reset triggered by falcon_reset_hw()
+ * to complete */
+ rc = qt2025c_wait_reset(efx);
+ if (rc < 0)
+ goto fail;
+ } else {
+ /* Reset the PHYXS MMD. This is documented as doing
+ * a complete soft reset. */
+ rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
+ QT2022C2_MAX_RESET_TIME /
+ QT2022C2_RESET_WAIT,
+ QT2022C2_RESET_WAIT);
+ if (rc < 0)
+ goto fail;
+ }
+
+ /* Wait 250ms for the PHY to complete bootup */
+ msleep(250);
+
+ falcon_board(efx)->type->init_phy(efx);
+
+ return 0;
+
+ fail:
+ netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n");
+ return rc;
+}
+
+static int qt202x_phy_probe(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data;
+
+ phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+ phy_data->bug17190_in_bad_state = false;
+ phy_data->bug17190_timer = 0;
+
+ efx->mdio.mmds = QT202X_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+ return 0;
+}
+
+static int qt202x_phy_init(struct efx_nic *efx)
+{
+ u32 devid;
+ int rc;
+
+ rc = qt202x_reset_phy(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "PHY init failed\n");
+ return rc;
+ }
+
+ devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
+ netif_info(efx, probe, efx->net_dev,
+ "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
+ devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
+ efx_mdio_id_rev(devid));
+
+ if (efx->phy_type == PHY_TYPE_QT2025C)
+ qt2025c_firmware_id(efx);
+
+ return 0;
+}
+
+static int qt202x_link_ok(struct efx_nic *efx)
+{
+ return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
+}
+
+static bool qt202x_phy_poll(struct efx_nic *efx)
+{
+ bool was_up = efx->link_state.up;
+
+ efx->link_state.up = qt202x_link_ok(efx);
+ efx->link_state.speed = 10000;
+ efx->link_state.fd = true;
+ efx->link_state.fc = efx->wanted_fc;
+
+ if (efx->phy_type == PHY_TYPE_QT2025C)
+ qt2025c_bug17190_workaround(efx);
+
+ return efx->link_state.up != was_up;
+}
+
+static int qt202x_phy_reconfigure(struct efx_nic *efx)
+{
+ struct qt202x_phy_data *phy_data = efx->phy_data;
+
+ if (efx->phy_type == PHY_TYPE_QT2025C) {
+ int rc = qt2025c_select_phy_mode(efx);
+ if (rc)
+ return rc;
+
+ /* There are several different register bits which can
+ * disable TX (and save power) on direct-attach cables
+ * or optical transceivers, varying somewhat between
+ * firmware versions. Only 'static mode' appears to
+ * cover everything. */
+ mdio_set_flag(
+ &efx->mdio, efx->mdio.prtad, MDIO_MMD_PMAPMD,
+ PMA_PMD_FTX_CTRL2_REG, 1 << PMA_PMD_FTX_STATIC_LBN,
+ efx->phy_mode & PHY_MODE_TX_DISABLED ||
+ efx->phy_mode & PHY_MODE_LOW_POWER ||
+ efx->loopback_mode == LOOPBACK_PCS ||
+ efx->loopback_mode == LOOPBACK_PMAPMD);
+ } else {
+ /* Reset the PHY when moving from tx off to tx on */
+ if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
+ (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
+ qt202x_reset_phy(efx);
+
+ efx_mdio_transmit_disable(efx);
+ }
+
+ efx_mdio_phy_reconfigure(efx);
+
+ phy_data->phy_mode = efx->phy_mode;
+
+ return 0;
+}
+
+static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ mdio45_ethtool_gset(&efx->mdio, ecmd);
+}
+
+static void qt202x_phy_remove(struct efx_nic *efx)
+{
+ /* Free the context block */
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+}
+
+static int qt202x_phy_get_module_info(struct efx_nic *efx,
+ struct ethtool_modinfo *modinfo)
+{
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ return 0;
+}
+
+static int qt202x_phy_get_module_eeprom(struct efx_nic *efx,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ int mmd, reg_base, rc, i;
+
+ if (efx->phy_type == PHY_TYPE_QT2025C) {
+ mmd = MDIO_MMD_PCS;
+ reg_base = 0xd000;
+ } else {
+ mmd = MDIO_MMD_PMAPMD;
+ reg_base = 0x8007;
+ }
+
+ for (i = 0; i < ee->len; i++) {
+ rc = efx_mdio_read(efx, mmd, reg_base + ee->offset + i);
+ if (rc < 0)
+ return rc;
+ data[i] = rc;
+ }
+
+ return 0;
+}
+
+const struct efx_phy_operations falcon_qt202x_phy_ops = {
+ .probe = qt202x_phy_probe,
+ .init = qt202x_phy_init,
+ .reconfigure = qt202x_phy_reconfigure,
+ .poll = qt202x_phy_poll,
+ .fini = efx_port_dummy_op_void,
+ .remove = qt202x_phy_remove,
+ .get_settings = qt202x_phy_get_settings,
+ .set_settings = efx_mdio_set_settings,
+ .test_alive = efx_mdio_test_alive,
+ .get_module_eeprom = qt202x_phy_get_module_eeprom,
+ .get_module_info = qt202x_phy_get_module_info,
+};
diff --git a/kernel/drivers/net/ethernet/sfc/rx.c b/kernel/drivers/net/ethernet/sfc/rx.c
new file mode 100644
index 000000000..809ea4610
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/rx.c
@@ -0,0 +1,997 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/prefetch.h>
+#include <linux/moduleparam.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "filter.h"
+#include "nic.h"
+#include "selftest.h"
+#include "workarounds.h"
+
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
+
+/* Number of RX buffers to recycle pages for. When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
+#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
+
+/* Size of buffer allocated for skb header area. */
+#define EFX_SKB_HEADERS 128u
+
+/* This is the percentage fill level below which new RX descriptors
+ * will be added to the RX descriptor ring.
+ */
+static unsigned int rx_refill_threshold;
+
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+ EFX_RX_USR_BUF_SIZE)
+
+/*
+ * RX maximum head room required.
+ *
+ * This must be at least 1 to prevent overflow, plus one packet-worth
+ * to allow pipelined receives.
+ */
+#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
+
+static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
+{
+ return page_address(buf->page) + buf->page_offset;
+}
+
+static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
+#else
+ const u8 *data = eh + efx->rx_packet_hash_offset;
+ return (u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24;
+#endif
+}
+
+static inline struct efx_rx_buffer *
+efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
+{
+ if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
+ return efx_rx_buffer(rx_queue, 0);
+ else
+ return rx_buf + 1;
+}
+
+static inline void efx_sync_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int len)
+{
+ dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
+ DMA_FROM_DEVICE);
+}
+
+void efx_rx_config_page_split(struct efx_nic *efx)
+{
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
+ EFX_RX_BUF_ALIGNMENT);
+ efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+ ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+ efx->rx_page_buf_step);
+ efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
+ efx->rx_bufs_per_page;
+ efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
+ efx->rx_bufs_per_page);
+}
+
+/* Check the RX page recycle ring for a page that can be reused. */
+static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ struct page *page;
+ struct efx_rx_page_state *state;
+ unsigned index;
+
+ index = rx_queue->page_remove & rx_queue->page_ptr_mask;
+ page = rx_queue->page_ring[index];
+ if (page == NULL)
+ return NULL;
+
+ rx_queue->page_ring[index] = NULL;
+ /* page_remove cannot exceed page_add. */
+ if (rx_queue->page_remove != rx_queue->page_add)
+ ++rx_queue->page_remove;
+
+ /* If page_count is 1 then we hold the only reference to this page. */
+ if (page_count(page) == 1) {
+ ++rx_queue->page_recycle_count;
+ return page;
+ } else {
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ ++rx_queue->page_recycle_failed;
+ }
+
+ return NULL;
+}
+
+/**
+ * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
+ *
+ * @rx_queue: Efx RX queue
+ *
+ * This allocates a batch of pages, maps them for DMA, and populates
+ * struct efx_rx_buffers for each one. Return a negative error code or
+ * 0 on success. If a single page can be used for multiple buffers,
+ * then the page will either be inserted fully, or not at all.
+ */
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_buffer *rx_buf;
+ struct page *page;
+ unsigned int page_offset;
+ struct efx_rx_page_state *state;
+ dma_addr_t dma_addr;
+ unsigned index, count;
+
+ count = 0;
+ do {
+ page = efx_reuse_page(rx_queue);
+ if (page == NULL) {
+ page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ (atomic ? GFP_ATOMIC : GFP_KERNEL),
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
+ return -ENOMEM;
+ dma_addr =
+ dma_map_page(&efx->pci_dev->dev, page, 0,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ dma_addr))) {
+ __free_pages(page, efx->rx_buffer_order);
+ return -EIO;
+ }
+ state = page_address(page);
+ state->dma_addr = dma_addr;
+ } else {
+ state = page_address(page);
+ dma_addr = state->dma_addr;
+ }
+
+ dma_addr += sizeof(struct efx_rx_page_state);
+ page_offset = sizeof(struct efx_rx_page_state);
+
+ do {
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
+ rx_buf->page = page;
+ rx_buf->page_offset = page_offset + efx->rx_ip_align;
+ rx_buf->len = efx->rx_dma_len;
+ rx_buf->flags = 0;
+ ++rx_queue->added_count;
+ get_page(page);
+ dma_addr += efx->rx_page_buf_step;
+ page_offset += efx->rx_page_buf_step;
+ } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
+
+ rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
+ } while (++count < efx->rx_pages_per_batch);
+
+ return 0;
+}
+
+/* Unmap a DMA-mapped page. This function is only called for the final RX
+ * buffer in a page.
+ */
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct page *page = rx_buf->page;
+
+ if (page) {
+ struct efx_rx_page_state *state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev,
+ state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ }
+}
+
+static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs)
+{
+ do {
+ if (rx_buf->page) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
+ }
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--num_bufs);
+}
+
+/* Attempt to recycle the page if there is an RX recycle ring; the page can
+ * only be added if this is the final RX buffer, to prevent pages being used in
+ * the descriptor ring and appearing in the recycle ring simultaneously.
+ */
+static void efx_recycle_rx_page(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct page *page = rx_buf->page;
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned index;
+
+ /* Only recycle the page after processing the final buffer. */
+ if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
+ return;
+
+ index = rx_queue->page_add & rx_queue->page_ptr_mask;
+ if (rx_queue->page_ring[index] == NULL) {
+ unsigned read_index = rx_queue->page_remove &
+ rx_queue->page_ptr_mask;
+
+ /* The next slot in the recycle ring is available, but
+ * increment page_remove if the read pointer currently
+ * points here.
+ */
+ if (read_index == index)
+ ++rx_queue->page_remove;
+ rx_queue->page_ring[index] = page;
+ ++rx_queue->page_add;
+ return;
+ }
+ ++rx_queue->page_recycle_full;
+ efx_unmap_rx_buffer(efx, rx_buf);
+ put_page(rx_buf->page);
+}
+
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+{
+ /* Release the page reference we hold for the buffer. */
+ if (rx_buf->page)
+ put_page(rx_buf->page);
+
+ /* If this is the last buffer in a page, unmap and free it. */
+ if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ }
+ rx_buf->page = NULL;
+}
+
+/* Recycle the pages that are used by buffers that have just been received. */
+static void efx_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+ do {
+ efx_recycle_rx_page(channel, rx_buf);
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--n_frags);
+}
+
+static void efx_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+ efx_recycle_rx_pages(channel, rx_buf, n_frags);
+
+ efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+}
+
+/**
+ * efx_fast_push_rx_descriptors - push new RX descriptors quickly
+ * @rx_queue: RX descriptor queue
+ *
+ * This will aim to fill the RX descriptor queue up to
+ * @rx_queue->@max_fill. If there is insufficient atomic
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
+ */
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int fill_level, batch_size;
+ int space, rc = 0;
+
+ if (!rx_queue->refill_enabled)
+ return;
+
+ /* Calculate current fill level, and exit if we don't need to fill */
+ fill_level = (rx_queue->added_count - rx_queue->removed_count);
+ EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
+ if (fill_level >= rx_queue->fast_fill_trigger)
+ goto out;
+
+ /* Record minimum fill level */
+ if (unlikely(fill_level < rx_queue->min_fill)) {
+ if (fill_level)
+ rx_queue->min_fill = fill_level;
+ }
+
+ batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ space = rx_queue->max_fill - fill_level;
+ EFX_BUG_ON_PARANOID(space < batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filling descriptor ring from"
+ " level %d to level %d\n",
+ efx_rx_queue_index(rx_queue), fill_level,
+ rx_queue->max_fill);
+
+
+ do {
+ rc = efx_init_rx_buffers(rx_queue, atomic);
+ if (unlikely(rc)) {
+ /* Ensure that we don't leave the rx queue empty */
+ if (rx_queue->added_count == rx_queue->removed_count)
+ efx_schedule_slow_fill(rx_queue);
+ goto out;
+ }
+ } while ((space -= batch_size) >= batch_size);
+
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filled descriptor ring "
+ "to level %d\n", efx_rx_queue_index(rx_queue),
+ rx_queue->added_count - rx_queue->removed_count);
+
+ out:
+ if (rx_queue->notified_count != rx_queue->added_count)
+ efx_nic_notify_rx_desc(rx_queue);
+}
+
+void efx_rx_slow_fill(unsigned long context)
+{
+ struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
+
+ /* Post an event to cause NAPI to run and refill the queue */
+ efx_nic_generate_fill_event(rx_queue);
+ ++rx_queue->slow_fill_count;
+}
+
+static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ int len)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
+
+ if (likely(len <= max_len))
+ return;
+
+ /* The packet must be discarded, but this is only a fatal error
+ * if the caller indicated it was
+ */
+ rx_buf->flags |= EFX_RX_PKT_DISCARD;
+
+ if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ " RX queue %d seriously overlength "
+ "RX event (0x%x > 0x%x+0x%x). Leaking\n",
+ efx_rx_queue_index(rx_queue), len, max_len,
+ efx->type->rx_buffer_padding);
+ efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
+ } else {
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ " RX queue %d overlength RX event "
+ "(0x%x > 0x%x)\n",
+ efx_rx_queue_index(rx_queue), len, max_len);
+ }
+
+ efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
+}
+
+/* Pass a received packet up through GRO. GRO can handle pages
+ * regardless of checksum state and skbs with a good checksum.
+ */
+static void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh)
+{
+ struct napi_struct *napi = &channel->napi_str;
+ gro_result_t gro_result;
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
+
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ struct efx_rx_queue *rx_queue;
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+ efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ return;
+ }
+
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
+ PKT_HASH_TYPE_L3);
+ skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
+
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+
+ skb->data_len = skb->len;
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+ skb_mark_napi_id(skb, &channel->napi_str);
+ gro_result = napi_gro_frags(napi);
+ if (gro_result != GRO_DROP)
+ channel->irq_mod_score += 2;
+}
+
+/* Allocate and construct an SKB around page fragments */
+static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags,
+ u8 *eh, int hdr_len)
+{
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
+
+ /* Allocate an SKB to store the headers */
+ skb = netdev_alloc_skb(efx->net_dev,
+ efx->rx_ip_align + efx->rx_prefix_size +
+ hdr_len);
+ if (unlikely(skb == NULL)) {
+ atomic_inc(&efx->n_rx_noskb_drops);
+ return NULL;
+ }
+
+ EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
+
+ memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
+ efx->rx_prefix_size + hdr_len);
+ skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
+ __skb_put(skb, hdr_len);
+
+ /* Append the remaining page(s) onto the frag list */
+ if (rx_buf->len > hdr_len) {
+ rx_buf->page_offset += hdr_len;
+ rx_buf->len -= hdr_len;
+
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ skb->data_len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
+
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+ } else {
+ __free_pages(rx_buf->page, efx->rx_buffer_order);
+ rx_buf->page = NULL;
+ n_frags = 0;
+ }
+
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ /* Move past the ethernet header */
+ skb->protocol = eth_type_trans(skb, efx->net_dev);
+
+ skb_mark_napi_id(skb, &channel->napi_str);
+
+ return skb;
+}
+
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ unsigned int n_frags, unsigned int len, u16 flags)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_rx_buffer *rx_buf;
+
+ rx_queue->rx_packets++;
+
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->flags |= flags;
+
+ /* Validate the number of fragments and completed length */
+ if (n_frags == 1) {
+ if (!(flags & EFX_RX_PKT_PREFIX_LEN))
+ efx_rx_packet__check_len(rx_queue, rx_buf, len);
+ } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
+ unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
+ unlikely(len > n_frags * efx->rx_dma_len) ||
+ unlikely(!efx->rx_scatter)) {
+ /* If this isn't an explicit discard request, either
+ * the hardware or the driver is broken.
+ */
+ WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
+ rx_buf->flags |= EFX_RX_PKT_DISCARD;
+ }
+
+ netif_vdbg(efx, rx_status, efx->net_dev,
+ "RX queue %d received ids %x-%x len %d %s%s\n",
+ efx_rx_queue_index(rx_queue), index,
+ (index + n_frags - 1) & rx_queue->ptr_mask, len,
+ (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
+ (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
+
+ /* Discard packet, if instructed to do so. Process the
+ * previous receive first.
+ */
+ if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
+ efx_rx_flush_packet(channel);
+ efx_discard_rx_packet(channel, rx_buf, n_frags);
+ return;
+ }
+
+ if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
+ rx_buf->len = len;
+
+ /* Release and/or sync the DMA mapping - assumes all RX buffers
+ * consumed in-order per RX queue.
+ */
+ efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
+
+ /* Prefetch nice and early so data will (hopefully) be in cache by
+ * the time we look at it.
+ */
+ prefetch(efx_rx_buf_va(rx_buf));
+
+ rx_buf->page_offset += efx->rx_prefix_size;
+ rx_buf->len -= efx->rx_prefix_size;
+
+ if (n_frags > 1) {
+ /* Release/sync DMA mapping for additional fragments.
+ * Fix length for last fragment.
+ */
+ unsigned int tail_frags = n_frags - 1;
+
+ for (;;) {
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ if (--tail_frags == 0)
+ break;
+ efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
+ }
+ rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
+ efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
+ }
+
+ /* All fragments have been DMA-synced, so recycle pages. */
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ efx_recycle_rx_pages(channel, rx_buf, n_frags);
+
+ /* Pipeline receives so that we give time for packet headers to be
+ * prefetched into cache.
+ */
+ efx_rx_flush_packet(channel);
+ channel->rx_pkt_n_frags = n_frags;
+ channel->rx_pkt_index = index;
+}
+
+static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct sk_buff *skb;
+ u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
+
+ skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
+ if (unlikely(skb == NULL)) {
+ struct efx_rx_queue *rx_queue;
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+ efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ return;
+ }
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+ /* Set the SKB flags */
+ skb_checksum_none_assert(skb);
+ if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ efx_rx_skb_attach_timestamp(channel, skb);
+
+ if (channel->type->receive_skb)
+ if (channel->type->receive_skb(channel, skb))
+ return;
+
+ /* Pass the packet up */
+ netif_receive_skb(skb);
+}
+
+/* Handle a received packet. Second half: Touches packet payload. */
+void __efx_rx_packet(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_buffer *rx_buf =
+ efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ u8 *eh = efx_rx_buf_va(rx_buf);
+
+ /* Read length from the prefix if necessary. This already
+ * excludes the length of the prefix itself.
+ */
+ if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
+ rx_buf->len = le16_to_cpup((__le16 *)
+ (eh + efx->rx_packet_len_offset));
+
+ /* If we're in loopback test, then pass the packet directly to the
+ * loopback layer, and free the rx_buf here
+ */
+ if (unlikely(efx->loopback_selftest)) {
+ struct efx_rx_queue *rx_queue;
+
+ efx_loopback_rx_packet(efx, eh, rx_buf->len);
+ rx_queue = efx_channel_get_rx_queue(channel);
+ efx_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
+ goto out;
+ }
+
+ if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
+ rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
+
+ if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb &&
+ !efx_channel_busy_polling(channel))
+ efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
+ else
+ efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
+out:
+ channel->rx_pkt_n_frags = 0;
+}
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ rx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating RX queue %d size %#x mask %#x\n",
+ efx_rx_queue_index(rx_queue), efx->rxq_entries,
+ rx_queue->ptr_mask);
+
+ /* Allocate RX buffers */
+ rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
+ GFP_KERNEL);
+ if (!rx_queue->buffer)
+ return -ENOMEM;
+
+ rc = efx_nic_probe_rx(rx_queue);
+ if (rc) {
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+ }
+
+ return rc;
+}
+
+static void efx_init_rx_recycle_ring(struct efx_nic *efx,
+ struct efx_rx_queue *rx_queue)
+{
+ unsigned int bufs_in_recycle_ring, page_ring_size;
+
+ /* Set the RX recycle ring size */
+#ifdef CONFIG_PPC64
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+#else
+ if (iommu_present(&pci_bus_type))
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+ else
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
+#endif /* CONFIG_PPC64 */
+
+ page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
+ efx->rx_bufs_per_page);
+ rx_queue->page_ring = kcalloc(page_ring_size,
+ sizeof(*rx_queue->page_ring), GFP_KERNEL);
+ rx_queue->page_ptr_mask = page_ring_size - 1;
+}
+
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int max_fill, trigger, max_trigger;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ /* Initialise ptr fields */
+ rx_queue->added_count = 0;
+ rx_queue->notified_count = 0;
+ rx_queue->removed_count = 0;
+ rx_queue->min_fill = -1U;
+ efx_init_rx_recycle_ring(efx, rx_queue);
+
+ rx_queue->page_remove = 0;
+ rx_queue->page_add = rx_queue->page_ptr_mask + 1;
+ rx_queue->page_recycle_count = 0;
+ rx_queue->page_recycle_failed = 0;
+ rx_queue->page_recycle_full = 0;
+
+ /* Initialise limit fields */
+ max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
+ max_trigger =
+ max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+ if (rx_refill_threshold != 0) {
+ trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
+ if (trigger > max_trigger)
+ trigger = max_trigger;
+ } else {
+ trigger = max_trigger;
+ }
+
+ rx_queue->max_fill = max_fill;
+ rx_queue->fast_fill_trigger = trigger;
+ rx_queue->refill_enabled = true;
+
+ /* Set up RX descriptor ring */
+ efx_nic_init_rx(rx_queue);
+}
+
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ int i;
+ struct efx_nic *efx = rx_queue->efx;
+ struct efx_rx_buffer *rx_buf;
+
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ del_timer_sync(&rx_queue->slow_fill);
+
+ /* Release RX buffers from the current read ptr to the write ptr */
+ if (rx_queue->buffer) {
+ for (i = rx_queue->removed_count; i < rx_queue->added_count;
+ i++) {
+ unsigned index = i & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ efx_fini_rx_buffer(rx_queue, rx_buf);
+ }
+ }
+
+ /* Unmap and release the pages in the recycle ring. Remove the ring. */
+ for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+ struct page *page = rx_queue->page_ring[i];
+ struct efx_rx_page_state *state;
+
+ if (page == NULL)
+ continue;
+
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
+ kfree(rx_queue->page_ring);
+ rx_queue->page_ring = NULL;
+}
+
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+ efx_nic_remove_rx(rx_queue);
+
+ kfree(rx_queue->buffer);
+ rx_queue->buffer = NULL;
+}
+
+
+module_param(rx_refill_threshold, uint, 0444);
+MODULE_PARM_DESC(rx_refill_threshold,
+ "RX descriptor ring refill threshold (%)");
+
+#ifdef CONFIG_RFS_ACCEL
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_filter_spec spec;
+ const __be16 *ports;
+ __be16 ether_type;
+ int nhoff;
+ int rc;
+
+ /* The core RPS/RFS code has already parsed and validated
+ * VLAN, IP and transport headers. We assume they are in the
+ * header area.
+ */
+
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ const struct vlan_hdr *vh =
+ (const struct vlan_hdr *)skb->data;
+
+ /* We can't filter on the IP 5-tuple and the vlan
+ * together, so just strip the vlan header and filter
+ * on the IP part.
+ */
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
+ ether_type = vh->h_vlan_encapsulated_proto;
+ nhoff = sizeof(struct vlan_hdr);
+ } else {
+ ether_type = skb->protocol;
+ nhoff = 0;
+ }
+
+ if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
+ efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
+ rxq_index);
+ spec.match_flags =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec.ether_type = ether_type;
+
+ if (ether_type == htons(ETH_P_IP)) {
+ const struct iphdr *ip =
+ (const struct iphdr *)(skb->data + nhoff);
+
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
+ if (ip_is_fragment(ip))
+ return -EPROTONOSUPPORT;
+ spec.ip_proto = ip->protocol;
+ spec.rem_host[0] = ip->saddr;
+ spec.loc_host[0] = ip->daddr;
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ } else {
+ const struct ipv6hdr *ip6 =
+ (const struct ipv6hdr *)(skb->data + nhoff);
+
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) <
+ nhoff + sizeof(*ip6) + 4);
+ spec.ip_proto = ip6->nexthdr;
+ memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
+ memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
+ ports = (const __be16 *)(ip6 + 1);
+ }
+
+ spec.rem_port = ports[0];
+ spec.loc_port = ports[1];
+
+ rc = efx->type->filter_rfs_insert(efx, &spec);
+ if (rc < 0)
+ return rc;
+
+ /* Remember this so we can check whether to expire the filter later */
+ efx->rps_flow_id[rc] = flow_id;
+ channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ ++channel->rfs_filters_added;
+
+ if (ether_type == htons(ETH_P_IP))
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ spec.rem_host, ntohs(ports[0]), spec.loc_host,
+ ntohs(ports[1]), rxq_index, flow_id, rc);
+ else
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
+ (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+ spec.rem_host, ntohs(ports[0]), spec.loc_host,
+ ntohs(ports[1]), rxq_index, flow_id, rc);
+
+ return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
+{
+ bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
+ unsigned int index, size;
+ u32 flow_id;
+
+ if (!spin_trylock_bh(&efx->filter_lock))
+ return false;
+
+ expire_one = efx->type->filter_rfs_expire_one;
+ index = efx->rps_expire_index;
+ size = efx->type->max_rx_ip_filters;
+ while (quota--) {
+ flow_id = efx->rps_flow_id[index];
+ if (expire_one(efx, flow_id, index))
+ netif_info(efx, rx_status, efx->net_dev,
+ "expired filter %d [flow %u]\n",
+ index, flow_id);
+ if (++index == size)
+ index = 0;
+ }
+ efx->rps_expire_index = index;
+
+ spin_unlock_bh(&efx->filter_lock);
+ return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+/**
+ * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * @spec: Specification to test
+ *
+ * Return: %true if the specification is a non-drop RX filter that
+ * matches a local MAC address I/G bit value of 1 or matches a local
+ * IPv4 or IPv6 address value in the respective multicast address
+ * range. Otherwise %false.
+ */
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+{
+ if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+ return false;
+
+ if (spec->match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
+ is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] == 0xff)
+ return true;
+ }
+
+ return false;
+}
diff --git a/kernel/drivers/net/ethernet/sfc/selftest.c b/kernel/drivers/net/ethernet/sfc/selftest.c
new file mode 100644
index 000000000..b605dfd5c
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/selftest.c
@@ -0,0 +1,788 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "selftest.h"
+#include "workarounds.h"
+
+/* IRQ latency can be enormous because:
+ * - All IRQs may be disabled on a CPU for a *long* time by e.g. a
+ * slow serial console or an old IDE driver doing error recovery
+ * - The PREEMPT_RT patches mostly deal with this, but also allow a
+ * tasklet or normal task to be given higher priority than our IRQ
+ * threads
+ * Try to avoid blaming the hardware for this.
+ */
+#define IRQ_TIMEOUT HZ
+
+/*
+ * Loopback test packet structure
+ *
+ * The self-test should stress every RSS vector, and unfortunately
+ * Falcon only performs RSS on TCP/UDP packets.
+ */
+struct efx_loopback_payload {
+ struct ethhdr header;
+ struct iphdr ip;
+ struct udphdr udp;
+ __be16 iteration;
+ char msg[64];
+} __packed;
+
+/* Loopback test source MAC address */
+static const u8 payload_source[ETH_ALEN] __aligned(2) = {
+ 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
+};
+
+static const char payload_msg[] =
+ "Hello world! This is an Efx loopback test in progress!";
+
+/* Interrupt mode names */
+static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
+static const char *const efx_interrupt_mode_names[] = {
+ [EFX_INT_MODE_MSIX] = "MSI-X",
+ [EFX_INT_MODE_MSI] = "MSI",
+ [EFX_INT_MODE_LEGACY] = "legacy",
+};
+#define INT_MODE(efx) \
+ STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
+
+/**
+ * efx_loopback_state - persistent state during a loopback selftest
+ * @flush: Drop all packets in efx_loopback_rx_packet
+ * @packet_count: Number of packets being used in this test
+ * @skbs: An array of skbs transmitted
+ * @offload_csum: Checksums are being offloaded
+ * @rx_good: RX good packet count
+ * @rx_bad: RX bad packet count
+ * @payload: Payload used in tests
+ */
+struct efx_loopback_state {
+ bool flush;
+ int packet_count;
+ struct sk_buff **skbs;
+ bool offload_csum;
+ atomic_t rx_good;
+ atomic_t rx_bad;
+ struct efx_loopback_payload payload;
+};
+
+/* How long to wait for all the packets to arrive (in ms) */
+#define LOOPBACK_TIMEOUT_MS 1000
+
+/**************************************************************************
+ *
+ * MII, NVRAM and register tests
+ *
+ **************************************************************************/
+
+static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc = 0;
+
+ if (efx->phy_op->test_alive) {
+ rc = efx->phy_op->test_alive(efx);
+ tests->phy_alive = rc ? -1 : 1;
+ }
+
+ return rc;
+}
+
+static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ int rc = 0;
+
+ if (efx->type->test_nvram) {
+ rc = efx->type->test_nvram(efx);
+ tests->nvram = rc ? -1 : 1;
+ }
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Interrupt and event queue testing
+ *
+ **************************************************************************/
+
+/* Test generation and receipt of interrupts */
+static int efx_test_interrupts(struct efx_nic *efx,
+ struct efx_self_tests *tests)
+{
+ unsigned long timeout, wait;
+ int cpu;
+
+ netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
+ tests->interrupt = -1;
+
+ efx_nic_irq_test_start(efx);
+ timeout = jiffies + IRQ_TIMEOUT;
+ wait = 1;
+
+ /* Wait for arrival of test interrupt. */
+ netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
+ do {
+ schedule_timeout_uninterruptible(wait);
+ cpu = efx_nic_irq_test_irq_cpu(efx);
+ if (cpu >= 0)
+ goto success;
+ wait *= 2;
+ } while (time_before(jiffies, timeout));
+
+ netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
+ return -ETIMEDOUT;
+
+ success:
+ netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
+ INT_MODE(efx), cpu);
+ tests->interrupt = 1;
+ return 0;
+}
+
+/* Test generation and receipt of interrupting events */
+static int efx_test_eventq_irq(struct efx_nic *efx,
+ struct efx_self_tests *tests)
+{
+ struct efx_channel *channel;
+ unsigned int read_ptr[EFX_MAX_CHANNELS];
+ unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
+ unsigned long timeout, wait;
+
+ BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
+
+ efx_for_each_channel(channel, efx) {
+ read_ptr[channel->channel] = channel->eventq_read_ptr;
+ set_bit(channel->channel, &dma_pend);
+ set_bit(channel->channel, &int_pend);
+ efx_nic_event_test_start(channel);
+ }
+
+ timeout = jiffies + IRQ_TIMEOUT;
+ wait = 1;
+
+ /* Wait for arrival of interrupts. NAPI processing may or may
+ * not complete in time, but we can cope in any case.
+ */
+ do {
+ schedule_timeout_uninterruptible(wait);
+
+ efx_for_each_channel(channel, efx) {
+ efx_stop_eventq(channel);
+ if (channel->eventq_read_ptr !=
+ read_ptr[channel->channel]) {
+ set_bit(channel->channel, &napi_ran);
+ clear_bit(channel->channel, &dma_pend);
+ clear_bit(channel->channel, &int_pend);
+ } else {
+ if (efx_nic_event_present(channel))
+ clear_bit(channel->channel, &dma_pend);
+ if (efx_nic_event_test_irq_cpu(channel) >= 0)
+ clear_bit(channel->channel, &int_pend);
+ }
+ efx_start_eventq(channel);
+ }
+
+ wait *= 2;
+ } while ((dma_pend || int_pend) && time_before(jiffies, timeout));
+
+ efx_for_each_channel(channel, efx) {
+ bool dma_seen = !test_bit(channel->channel, &dma_pend);
+ bool int_seen = !test_bit(channel->channel, &int_pend);
+
+ tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
+ tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
+
+ if (dma_seen && int_seen) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "channel %d event queue passed (with%s NAPI)\n",
+ channel->channel,
+ test_bit(channel->channel, &napi_ran) ?
+ "" : "out");
+ } else {
+ /* Report failure and whether either interrupt or DMA
+ * worked
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d timed out waiting for event queue\n",
+ channel->channel);
+ if (int_seen)
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d saw interrupt "
+ "during event queue test\n",
+ channel->channel);
+ if (dma_seen)
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d event was generated, but "
+ "failed to trigger an interrupt\n",
+ channel->channel);
+ }
+ }
+
+ return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
+}
+
+static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned flags)
+{
+ int rc;
+
+ if (!efx->phy_op->run_tests)
+ return 0;
+
+ mutex_lock(&efx->mac_lock);
+ rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Loopback testing
+ * NB Only one loopback test can be executing concurrently.
+ *
+ **************************************************************************/
+
+/* Loopback test RX callback
+ * This is called for each received packet during loopback testing.
+ */
+void efx_loopback_rx_packet(struct efx_nic *efx,
+ const char *buf_ptr, int pkt_len)
+{
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct efx_loopback_payload *received;
+ struct efx_loopback_payload *payload;
+
+ BUG_ON(!buf_ptr);
+
+ /* If we are just flushing, then drop the packet */
+ if ((state == NULL) || state->flush)
+ return;
+
+ payload = &state->payload;
+
+ received = (struct efx_loopback_payload *) buf_ptr;
+ received->ip.saddr = payload->ip.saddr;
+ if (state->offload_csum)
+ received->ip.check = payload->ip.check;
+
+ /* Check that header exists */
+ if (pkt_len < sizeof(received->header)) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw runt RX packet (length %d) in %s loopback "
+ "test\n", pkt_len, LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that the ethernet header exists */
+ if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw non-loopback RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check packet length */
+ if (pkt_len != sizeof(*payload)) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw incorrect RX packet length %d (wanted %d) in "
+ "%s loopback test\n", pkt_len, (int)sizeof(*payload),
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that IP header matches */
+ if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw corrupted IP header in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that msg and padding matches */
+ if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw corrupted RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that iteration matches */
+ if (received->iteration != payload->iteration) {
+ netif_err(efx, drv, efx->net_dev,
+ "saw RX packet from iteration %d (wanted %d) in "
+ "%s loopback test\n", ntohs(received->iteration),
+ ntohs(payload->iteration), LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Increase correct RX count */
+ netif_vdbg(efx, drv, efx->net_dev,
+ "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
+
+ atomic_inc(&state->rx_good);
+ return;
+
+ err:
+#ifdef DEBUG
+ if (atomic_read(&state->rx_bad) == 0) {
+ netif_err(efx, drv, efx->net_dev, "received packet:\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ buf_ptr, pkt_len, 0);
+ netif_err(efx, drv, efx->net_dev, "expected packet:\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ &state->payload, sizeof(state->payload), 0);
+ }
+#endif
+ atomic_inc(&state->rx_bad);
+}
+
+/* Initialise an efx_selftest_state for a new iteration */
+static void efx_iterate_state(struct efx_nic *efx)
+{
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_loopback_payload *payload = &state->payload;
+
+ /* Initialise the layerII header */
+ ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
+ ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
+ payload->header.h_proto = htons(ETH_P_IP);
+
+ /* saddr set later and used as incrementing count */
+ payload->ip.daddr = htonl(INADDR_LOOPBACK);
+ payload->ip.ihl = 5;
+ payload->ip.check = (__force __sum16) htons(0xdead);
+ payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
+ payload->ip.version = IPVERSION;
+ payload->ip.protocol = IPPROTO_UDP;
+
+ /* Initialise udp header */
+ payload->udp.source = 0;
+ payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
+ sizeof(struct iphdr));
+ payload->udp.check = 0; /* checksum ignored */
+
+ /* Fill out payload */
+ payload->iteration = htons(ntohs(payload->iteration) + 1);
+ memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
+
+ /* Fill out remaining state members */
+ atomic_set(&state->rx_good, 0);
+ atomic_set(&state->rx_bad, 0);
+ smp_wmb();
+}
+
+static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct efx_loopback_payload *payload;
+ struct sk_buff *skb;
+ int i;
+ netdev_tx_t rc;
+
+ /* Transmit N copies of buffer */
+ for (i = 0; i < state->packet_count; i++) {
+ /* Allocate an skb, holding an extra reference for
+ * transmit completion counting */
+ skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ state->skbs[i] = skb;
+ skb_get(skb);
+
+ /* Copy the payload in, incrementing the source address to
+ * exercise the rss vectors */
+ payload = ((struct efx_loopback_payload *)
+ skb_put(skb, sizeof(state->payload)));
+ memcpy(payload, &state->payload, sizeof(state->payload));
+ payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
+
+ /* Ensure everything we've written is visible to the
+ * interrupt handler. */
+ smp_wmb();
+
+ netif_tx_lock_bh(efx->net_dev);
+ rc = efx_enqueue_skb(tx_queue, skb);
+ netif_tx_unlock_bh(efx->net_dev);
+
+ if (rc != NETDEV_TX_OK) {
+ netif_err(efx, drv, efx->net_dev,
+ "TX queue %d could not transmit packet %d of "
+ "%d in %s loopback test\n", tx_queue->queue,
+ i + 1, state->packet_count,
+ LOOPBACK_MODE(efx));
+
+ /* Defer cleaning up the other skbs for the caller */
+ kfree_skb(skb);
+ return -EPIPE;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_poll_loopback(struct efx_nic *efx)
+{
+ struct efx_loopback_state *state = efx->loopback_selftest;
+
+ return atomic_read(&state->rx_good) == state->packet_count;
+}
+
+static int efx_end_loopback(struct efx_tx_queue *tx_queue,
+ struct efx_loopback_self_tests *lb_tests)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ struct sk_buff *skb;
+ int tx_done = 0, rx_good, rx_bad;
+ int i, rc = 0;
+
+ netif_tx_lock_bh(efx->net_dev);
+
+ /* Count the number of tx completions, and decrement the refcnt. Any
+ * skbs not already completed will be free'd when the queue is flushed */
+ for (i = 0; i < state->packet_count; i++) {
+ skb = state->skbs[i];
+ if (skb && !skb_shared(skb))
+ ++tx_done;
+ dev_kfree_skb(skb);
+ }
+
+ netif_tx_unlock_bh(efx->net_dev);
+
+ /* Check TX completion and received packet counts */
+ rx_good = atomic_read(&state->rx_good);
+ rx_bad = atomic_read(&state->rx_bad);
+ if (tx_done != state->packet_count) {
+ /* Don't free the skbs; they will be picked up on TX
+ * overflow or channel teardown.
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "TX queue %d saw only %d out of an expected %d "
+ "TX completion events in %s loopback test\n",
+ tx_queue->queue, tx_done, state->packet_count,
+ LOOPBACK_MODE(efx));
+ rc = -ETIMEDOUT;
+ /* Allow to fall through so we see the RX errors as well */
+ }
+
+ /* We may always be up to a flush away from our desired packet total */
+ if (rx_good != state->packet_count) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d saw only %d out of an expected %d "
+ "received packets in %s loopback test\n",
+ tx_queue->queue, rx_good, state->packet_count,
+ LOOPBACK_MODE(efx));
+ rc = -ETIMEDOUT;
+ /* Fall through */
+ }
+
+ /* Update loopback test structure */
+ lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
+ lb_tests->tx_done[tx_queue->queue] += tx_done;
+ lb_tests->rx_good += rx_good;
+ lb_tests->rx_bad += rx_bad;
+
+ return rc;
+}
+
+static int
+efx_test_loopback(struct efx_tx_queue *tx_queue,
+ struct efx_loopback_self_tests *lb_tests)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_loopback_state *state = efx->loopback_selftest;
+ int i, begin_rc, end_rc;
+
+ for (i = 0; i < 3; i++) {
+ /* Determine how many packets to send */
+ state->packet_count = efx->txq_entries / 3;
+ state->packet_count = min(1 << (i << 2), state->packet_count);
+ state->skbs = kcalloc(state->packet_count,
+ sizeof(state->skbs[0]), GFP_KERNEL);
+ if (!state->skbs)
+ return -ENOMEM;
+ state->flush = false;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d testing %s loopback with %d packets\n",
+ tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
+
+ efx_iterate_state(efx);
+ begin_rc = efx_begin_loopback(tx_queue);
+
+ /* This will normally complete very quickly, but be
+ * prepared to wait much longer. */
+ msleep(1);
+ if (!efx_poll_loopback(efx)) {
+ msleep(LOOPBACK_TIMEOUT_MS);
+ efx_poll_loopback(efx);
+ }
+
+ end_rc = efx_end_loopback(tx_queue, lb_tests);
+ kfree(state->skbs);
+
+ if (begin_rc || end_rc) {
+ /* Wait a while to ensure there are no packets
+ * floating around after a failure. */
+ schedule_timeout_uninterruptible(HZ / 10);
+ return begin_rc ? begin_rc : end_rc;
+ }
+ }
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d passed %s loopback test with a burst length "
+ "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
+
+ return 0;
+}
+
+/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
+ * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
+ * to delay and retry. Therefore, it's safer to just poll directly. Wait
+ * for link up and any faults to dissipate. */
+static int efx_wait_for_link(struct efx_nic *efx)
+{
+ struct efx_link_state *link_state = &efx->link_state;
+ int count, link_up_count = 0;
+ bool link_up;
+
+ for (count = 0; count < 40; count++) {
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ if (efx->type->monitor != NULL) {
+ mutex_lock(&efx->mac_lock);
+ efx->type->monitor(efx);
+ mutex_unlock(&efx->mac_lock);
+ }
+
+ mutex_lock(&efx->mac_lock);
+ link_up = link_state->up;
+ if (link_up)
+ link_up = !efx->type->check_mac_fault(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ if (link_up) {
+ if (++link_up_count == 2)
+ return 0;
+ } else {
+ link_up_count = 0;
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned int loopback_modes)
+{
+ enum efx_loopback_mode mode;
+ struct efx_loopback_state *state;
+ struct efx_channel *channel =
+ efx_get_channel(efx, efx->tx_channel_offset);
+ struct efx_tx_queue *tx_queue;
+ int rc = 0;
+
+ /* Set the port loopback_selftest member. From this point on
+ * all received packets will be dropped. Mark the state as
+ * "flushing" so all inflight packets are dropped */
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
+ return -ENOMEM;
+ BUG_ON(efx->loopback_selftest);
+ state->flush = true;
+ efx->loopback_selftest = state;
+
+ /* Test all supported loopback modes */
+ for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+ if (!(loopback_modes & (1 << mode)))
+ continue;
+
+ /* Move the port into the specified loopback mode. */
+ state->flush = true;
+ mutex_lock(&efx->mac_lock);
+ efx->loopback_mode = mode;
+ rc = __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "unable to move into %s loopback\n",
+ LOOPBACK_MODE(efx));
+ goto out;
+ }
+
+ rc = efx_wait_for_link(efx);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "loopback %s never came up\n",
+ LOOPBACK_MODE(efx));
+ goto out;
+ }
+
+ /* Test all enabled types of TX queue */
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ state->offload_csum = (tx_queue->queue &
+ EFX_TXQ_TYPE_OFFLOAD);
+ rc = efx_test_loopback(tx_queue,
+ &tests->loopback[mode]);
+ if (rc)
+ goto out;
+ }
+ }
+
+ out:
+ /* Remove the flush. The caller will remove the loopback setting */
+ state->flush = true;
+ efx->loopback_selftest = NULL;
+ wmb();
+ kfree(state);
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Entry point
+ *
+ *************************************************************************/
+
+int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned flags)
+{
+ enum efx_loopback_mode loopback_mode = efx->loopback_mode;
+ int phy_mode = efx->phy_mode;
+ int rc_test = 0, rc_reset, rc;
+
+ efx_selftest_async_cancel(efx);
+
+ /* Online (i.e. non-disruptive) testing
+ * This checks interrupt generation, event delivery and PHY presence. */
+
+ rc = efx_test_phy_alive(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = efx_test_nvram(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = efx_test_interrupts(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = efx_test_eventq_irq(efx, tests);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ if (rc_test)
+ return rc_test;
+
+ if (!(flags & ETH_TEST_FL_OFFLINE))
+ return efx_test_phy(efx, tests, flags);
+
+ /* Offline (i.e. disruptive) testing
+ * This checks MAC and PHY loopback on the specified port. */
+
+ /* Detach the device so the kernel doesn't transmit during the
+ * loopback test and the watchdog timeout doesn't fire.
+ */
+ efx_device_detach_sync(efx);
+
+ if (efx->type->test_chip) {
+ rc_reset = efx->type->test_chip(efx, tests);
+ if (rc_reset) {
+ netif_err(efx, hw, efx->net_dev,
+ "Unable to recover from chip test\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ return rc_reset;
+ }
+
+ if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
+ rc_test = -EIO;
+ }
+
+ /* Ensure that the phy is powered and out of loopback
+ * for the bist and loopback tests */
+ mutex_lock(&efx->mac_lock);
+ efx->phy_mode &= ~PHY_MODE_LOW_POWER;
+ efx->loopback_mode = LOOPBACK_NONE;
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ rc = efx_test_phy(efx, tests, flags);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
+ if (rc && !rc_test)
+ rc_test = rc;
+
+ /* restore the PHY to the previous state */
+ mutex_lock(&efx->mac_lock);
+ efx->phy_mode = phy_mode;
+ efx->loopback_mode = loopback_mode;
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ netif_device_attach(efx->net_dev);
+
+ return rc_test;
+}
+
+void efx_selftest_async_start(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ efx_nic_event_test_start(channel);
+ schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
+}
+
+void efx_selftest_async_cancel(struct efx_nic *efx)
+{
+ cancel_delayed_work_sync(&efx->selftest_work);
+}
+
+void efx_selftest_async_work(struct work_struct *data)
+{
+ struct efx_nic *efx = container_of(data, struct efx_nic,
+ selftest_work.work);
+ struct efx_channel *channel;
+ int cpu;
+
+ efx_for_each_channel(channel, efx) {
+ cpu = efx_nic_event_test_irq_cpu(channel);
+ if (cpu < 0)
+ netif_err(efx, ifup, efx->net_dev,
+ "channel %d failed to trigger an interrupt\n",
+ channel->channel);
+ else
+ netif_dbg(efx, ifup, efx->net_dev,
+ "channel %d triggered interrupt on CPU %d\n",
+ channel->channel, cpu);
+ }
+}
diff --git a/kernel/drivers/net/ethernet/sfc/selftest.h b/kernel/drivers/net/ethernet/sfc/selftest.h
new file mode 100644
index 000000000..009dbe88f
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/selftest.h
@@ -0,0 +1,55 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SELFTEST_H
+#define EFX_SELFTEST_H
+
+#include "net_driver.h"
+
+/*
+ * Self tests
+ */
+
+struct efx_loopback_self_tests {
+ int tx_sent[EFX_TXQ_TYPES];
+ int tx_done[EFX_TXQ_TYPES];
+ int rx_good;
+ int rx_bad;
+};
+
+#define EFX_MAX_PHY_TESTS 20
+
+/* Efx self test results
+ * For fields which are not counters, 1 indicates success and -1
+ * indicates failure.
+ */
+struct efx_self_tests {
+ /* online tests */
+ int phy_alive;
+ int nvram;
+ int interrupt;
+ int eventq_dma[EFX_MAX_CHANNELS];
+ int eventq_int[EFX_MAX_CHANNELS];
+ /* offline tests */
+ int memory;
+ int registers;
+ int phy_ext[EFX_MAX_PHY_TESTS];
+ struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
+};
+
+void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
+ int pkt_len);
+int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+ unsigned flags);
+void efx_selftest_async_start(struct efx_nic *efx);
+void efx_selftest_async_cancel(struct efx_nic *efx);
+void efx_selftest_async_work(struct work_struct *data);
+
+#endif /* EFX_SELFTEST_H */
diff --git a/kernel/drivers/net/ethernet/sfc/siena.c b/kernel/drivers/net/ethernet/sfc/siena.c
new file mode 100644
index 000000000..f12c81193
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/siena.c
@@ -0,0 +1,1029 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "farch_regs.h"
+#include "io.h"
+#include "phy.h"
+#include "workarounds.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "selftest.h"
+
+/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
+
+static void siena_init_wol(struct efx_nic *efx);
+
+
+static void siena_push_irq_moderation(struct efx_channel *channel)
+{
+ efx_dword_t timer_cmd;
+
+ if (channel->irq_moderation)
+ EFX_POPULATE_DWORD_2(timer_cmd,
+ FRF_CZ_TC_TIMER_MODE,
+ FFE_CZ_TIMER_MODE_INT_HLDOFF,
+ FRF_CZ_TC_TIMER_VAL,
+ channel->irq_moderation - 1);
+ else
+ EFX_POPULATE_DWORD_2(timer_cmd,
+ FRF_CZ_TC_TIMER_MODE,
+ FFE_CZ_TIMER_MODE_DIS,
+ FRF_CZ_TC_TIMER_VAL, 0);
+ efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
+ channel->channel);
+}
+
+void siena_prepare_flush(struct efx_nic *efx)
+{
+ if (efx->fc_disable++ == 0)
+ efx_mcdi_set_mac(efx);
+}
+
+void siena_finish_flush(struct efx_nic *efx)
+{
+ if (--efx->fc_disable == 0)
+ efx_mcdi_set_mac(efx);
+}
+
+static const struct efx_farch_register_test siena_register_tests[] = {
+ { FR_AZ_ADR_REGION,
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
+ { FR_CZ_USR_EV_CFG,
+ EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_CFG,
+ EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) },
+ { FR_AZ_TX_CFG,
+ EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) },
+ { FR_AZ_TX_RESERVED,
+ EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
+ { FR_AZ_SRM_TX_DC_CFG,
+ EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_DC_CFG,
+ EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_AZ_RX_DC_PF_WM,
+ EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_BZ_DP_CTRL,
+ EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
+ { FR_BZ_RX_RSS_TKEY,
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
+ { FR_CZ_RX_RSS_IPV6_REG1,
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
+ { FR_CZ_RX_RSS_IPV6_REG2,
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
+ { FR_CZ_RX_RSS_IPV6_REG3,
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
+};
+
+static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ enum reset_type reset_method = RESET_TYPE_ALL;
+ int rc, rc2;
+
+ efx_reset_down(efx, reset_method);
+
+ /* Reset the chip immediately so that it is completely
+ * quiescent regardless of what any VF driver does.
+ */
+ rc = efx_mcdi_reset(efx, reset_method);
+ if (rc)
+ goto out;
+
+ tests->registers =
+ efx_farch_test_registers(efx, siena_register_tests,
+ ARRAY_SIZE(siena_register_tests))
+ ? -1 : 1;
+
+ rc = efx_mcdi_reset(efx, reset_method);
+out:
+ rc2 = efx_reset_up(efx, reset_method, rc == 0);
+ return rc ? rc : rc2;
+}
+
+/**************************************************************************
+ *
+ * PTP
+ *
+ **************************************************************************
+ */
+
+static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time),
+ FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+}
+
+static int siena_ptp_set_ts_config(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ int rc;
+
+ switch (init->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* if TX timestamping is still requested then leave PTP on */
+ return efx_ptp_change_mode(efx,
+ init->tx_type != HWTSTAMP_TX_OFF,
+ efx_ptp_get_mode(efx));
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1);
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ rc = efx_ptp_change_mode(efx, true,
+ MC_CMD_PTP_MODE_V2_ENHANCED);
+ /* bug 33070 - old versions of the firmware do not support the
+ * improved UUID filtering option. Similarly old versions of the
+ * application do not expect it to be enabled. If the firmware
+ * does not accept the enhanced mode, fall back to the standard
+ * PTP v2 UUID filtering. */
+ if (rc != 0)
+ rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2);
+ return rc;
+ default:
+ return -ERANGE;
+ }
+}
+
+/**************************************************************************
+ *
+ * Device reset
+ *
+ **************************************************************************
+ */
+
+static int siena_map_reset_flags(u32 *flags)
+{
+ enum {
+ SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER |
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC |
+ ETH_RESET_PHY),
+ SIENA_RESET_MC = (SIENA_RESET_PORT |
+ ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT),
+ };
+
+ if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) {
+ *flags &= ~SIENA_RESET_MC;
+ return RESET_TYPE_WORLD;
+ }
+
+ if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) {
+ *flags &= ~SIENA_RESET_PORT;
+ return RESET_TYPE_ALL;
+ }
+
+ /* no invisible reset implemented */
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_EEH
+/* When a PCI device is isolated from the bus, a subsequent MMIO read is
+ * required for the kernel EEH mechanisms to notice. As the Solarflare driver
+ * was written to minimise MMIO read (for latency) then a periodic call to check
+ * the EEH status of the device is required so that device recovery can happen
+ * in a timely fashion.
+ */
+static void siena_monitor(struct efx_nic *efx)
+{
+ struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
+
+ eeh_dev_check_failure(eehdev);
+}
+#endif
+
+static int siena_probe_nvconfig(struct efx_nic *efx)
+{
+ u32 caps = 0;
+ int rc;
+
+ rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps);
+
+ efx->timer_quantum_ns =
+ (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ?
+ 3072 : 6144; /* 768 cycles */
+ return rc;
+}
+
+static int siena_dimension_resources(struct efx_nic *efx)
+{
+ /* Each port has a small block of internal SRAM dedicated to
+ * the buffer table and descriptor caches. In theory we can
+ * map both blocks to one port, but we don't.
+ */
+ efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
+ return 0;
+}
+
+static unsigned int siena_mem_map_size(struct efx_nic *efx)
+{
+ return FR_CZ_MC_TREG_SMEM +
+ FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS;
+}
+
+static int siena_probe_nic(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data;
+ efx_oword_t reg;
+ int rc;
+
+ /* Allocate storage for hardware specific data */
+ nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
+ nic_data->efx = efx;
+ efx->nic_data = nic_data;
+
+ if (efx_farch_fpga_ver(efx) != 0) {
+ netif_err(efx, probe, efx->net_dev,
+ "Siena FPGA not supported\n");
+ rc = -ENODEV;
+ goto fail1;
+ }
+
+ efx->max_channels = EFX_MAX_CHANNELS;
+
+ efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
+ efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
+
+ rc = efx_mcdi_init(efx);
+ if (rc)
+ goto fail1;
+
+ /* Now we can reset the NIC */
+ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
+ goto fail3;
+ }
+
+ siena_init_wol(efx);
+
+ /* Allocate memory for INT_KER */
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
+ GFP_KERNEL);
+ if (rc)
+ goto fail4;
+ BUG_ON(efx->irq_status.dma_addr & 0x0f);
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "INT_KER at %llx (virt %p phys %llx)\n",
+ (unsigned long long)efx->irq_status.dma_addr,
+ efx->irq_status.addr,
+ (unsigned long long)virt_to_phys(efx->irq_status.addr));
+
+ /* Read in the non-volatile configuration */
+ rc = siena_probe_nvconfig(efx);
+ if (rc == -EINVAL) {
+ netif_err(efx, probe, efx->net_dev,
+ "NVRAM is invalid therefore using defaults\n");
+ efx->phy_type = PHY_TYPE_NONE;
+ efx->mdio.prtad = MDIO_PRTAD_NONE;
+ } else if (rc) {
+ goto fail5;
+ }
+
+ rc = efx_mcdi_mon_probe(efx);
+ if (rc)
+ goto fail5;
+
+ efx_siena_sriov_probe(efx);
+ efx_ptp_defer_probe_with_channel(efx);
+
+ return 0;
+
+fail5:
+ efx_nic_free_buffer(efx, &efx->irq_status);
+fail4:
+fail3:
+ efx_mcdi_fini(efx);
+fail1:
+ kfree(efx->nic_data);
+ return rc;
+}
+
+static void siena_rx_push_rss_config(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ /* Enable IPv6 RSS */
+ BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
+ 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
+ memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+
+ efx_farch_rx_push_indir_table(efx);
+}
+
+/* This call performs hardware-specific global initialisation, such as
+ * defining the descriptor cache sizes and number of RSS channels.
+ * It does not set up any buffers, descriptor rings or event queues.
+ */
+static int siena_init_nic(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+ int rc;
+
+ /* Recover from a failed assertion post-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ /* Squash TX of packets of 16 bytes or less */
+ efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
+ * descriptors (which is bad).
+ */
+ efx_reado(efx, &temp, FR_AZ_TX_CFG);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_CFG);
+
+ efx_reado(efx, &temp, FR_AZ_RX_CFG);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
+ /* Enable hash insertion. This is broken for the 'Falcon' hash
+ * if IPv6 hashing is also enabled, so also select Toeplitz
+ * TCP/IPv4 and IPv4 hashes. */
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE,
+ EFX_RX_USR_BUF_SIZE >> 5);
+ efx_writeo(efx, &temp, FR_AZ_RX_CFG);
+
+ siena_rx_push_rss_config(efx);
+
+ /* Enable event logging */
+ rc = efx_mcdi_log_ctrl(efx, true, false, 0);
+ if (rc)
+ return rc;
+
+ /* Set destination of both TX and RX Flush events */
+ EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
+ efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
+
+ EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
+ efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
+
+ efx_farch_init_common(efx);
+ return 0;
+}
+
+static void siena_remove_nic(struct efx_nic *efx)
+{
+ efx_mcdi_mon_remove(efx);
+
+ efx_nic_free_buffer(efx, &efx->irq_status);
+
+ efx_mcdi_reset(efx, RESET_TYPE_ALL);
+
+ efx_mcdi_fini(efx);
+
+ /* Tear down the private nic state */
+ kfree(efx->nic_data);
+ efx->nic_data = NULL;
+}
+
+#define SIENA_DMA_STAT(ext_name, mcdi_name) \
+ [SIENA_STAT_ ## ext_name] = \
+ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define SIENA_OTHER_STAT(ext_name) \
+ [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+#define GENERIC_SW_STAT(ext_name) \
+ [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = {
+ SIENA_DMA_STAT(tx_bytes, TX_BYTES),
+ SIENA_OTHER_STAT(tx_good_bytes),
+ SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES),
+ SIENA_DMA_STAT(tx_packets, TX_PKTS),
+ SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS),
+ SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
+ SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS),
+ SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
+ SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
+ SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
+ SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS),
+ SIENA_DMA_STAT(tx_64, TX_64_PKTS),
+ SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
+ SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
+ SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
+ SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
+ SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS),
+ SIENA_OTHER_STAT(tx_collision),
+ SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS),
+ SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS),
+ SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS),
+ SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS),
+ SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS),
+ SIENA_DMA_STAT(rx_bytes, RX_BYTES),
+ SIENA_OTHER_STAT(rx_good_bytes),
+ SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES),
+ SIENA_DMA_STAT(rx_packets, RX_PKTS),
+ SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS),
+ SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
+ SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
+ SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS),
+ SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
+ SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
+ SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
+ SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
+ SIENA_DMA_STAT(rx_64, RX_64_PKTS),
+ SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
+ SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
+ SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
+ SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
+ SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
+ SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
+ SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
+ SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS),
+ SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS),
+ GENERIC_SW_STAT(rx_nodesc_trunc),
+ GENERIC_SW_STAT(rx_noskb_drops),
+};
+static const unsigned long siena_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
+};
+
+static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
+ siena_stat_mask, names);
+}
+
+static int siena_try_update_nic_stats(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ __le64 *dma_stats;
+ __le64 generation_start, generation_end;
+
+ dma_stats = efx->stats_buffer.addr;
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
+ return 0;
+ rmb();
+ efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask,
+ stats, efx->stats_buffer.addr, false);
+ rmb();
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start)
+ return -EAGAIN;
+
+ /* Update derived statistics */
+ efx_nic_fix_nodesc_drop_stat(efx,
+ &stats[SIENA_STAT_rx_nodesc_drop_cnt]);
+ efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
+ stats[SIENA_STAT_tx_bytes] -
+ stats[SIENA_STAT_tx_bad_bytes]);
+ stats[SIENA_STAT_tx_collision] =
+ stats[SIENA_STAT_tx_single_collision] +
+ stats[SIENA_STAT_tx_multiple_collision] +
+ stats[SIENA_STAT_tx_excessive_collision] +
+ stats[SIENA_STAT_tx_late_collision];
+ efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
+ stats[SIENA_STAT_rx_bytes] -
+ stats[SIENA_STAT_rx_bad_bytes]);
+ efx_update_sw_stats(efx, stats);
+ return 0;
+}
+
+static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ int retry;
+
+ /* If we're unlucky enough to read statistics wduring the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us) */
+ for (retry = 0; retry < 100; ++retry) {
+ if (siena_try_update_nic_stats(efx) == 0)
+ break;
+ udelay(100);
+ }
+
+ if (full_stats)
+ memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT);
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[SIENA_STAT_rx_packets];
+ core_stats->tx_packets = stats[SIENA_STAT_tx_packets];
+ core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt] +
+ stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[SIENA_STAT_rx_multicast];
+ core_stats->collisions = stats[SIENA_STAT_tx_collision];
+ core_stats->rx_length_errors =
+ stats[SIENA_STAT_rx_gtjumbo] +
+ stats[SIENA_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow];
+ core_stats->tx_window_errors =
+ stats[SIENA_STAT_tx_late_collision];
+
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors +
+ stats[SIENA_STAT_rx_symbol_error]);
+ core_stats->tx_errors = (core_stats->tx_window_errors +
+ stats[SIENA_STAT_tx_bad]);
+ }
+
+ return SIENA_STAT_COUNT;
+}
+
+static int siena_mac_reconfigure(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN);
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN !=
+ MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST +
+ sizeof(efx->multicast_hash));
+
+ efx_farch_filter_sync_rx_mode(efx);
+
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ rc = efx_mcdi_set_mac(efx);
+ if (rc != 0)
+ return rc;
+
+ memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0),
+ efx->multicast_hash.byte, sizeof(efx->multicast_hash));
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+}
+
+/**************************************************************************
+ *
+ * Wake on LAN
+ *
+ **************************************************************************
+ */
+
+static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+
+ wol->supported = WAKE_MAGIC;
+ if (nic_data->wol_filter_id != -1)
+ wol->wolopts = WAKE_MAGIC;
+ else
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+
+static int siena_set_wol(struct efx_nic *efx, u32 type)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (type & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if (type & WAKE_MAGIC) {
+ if (nic_data->wol_filter_id != -1)
+ efx_mcdi_wol_filter_remove(efx,
+ nic_data->wol_filter_id);
+ rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
+ &nic_data->wol_filter_id);
+ if (rc)
+ goto fail;
+
+ pci_wake_from_d3(efx->pci_dev, true);
+ } else {
+ rc = efx_mcdi_wol_filter_reset(efx);
+ nic_data->wol_filter_id = -1;
+ pci_wake_from_d3(efx->pci_dev, false);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+ fail:
+ netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n",
+ __func__, type, rc);
+ return rc;
+}
+
+
+static void siena_init_wol(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
+
+ if (rc != 0) {
+ /* If it failed, attempt to get into a synchronised
+ * state with MC by resetting any set WoL filters */
+ efx_mcdi_wol_filter_reset(efx);
+ nic_data->wol_filter_id = -1;
+ } else if (nic_data->wol_filter_id != -1) {
+ pci_wake_from_d3(efx->pci_dev, true);
+ }
+}
+
+/**************************************************************************
+ *
+ * MCDI
+ *
+ **************************************************************************
+ */
+
+#define MCDI_PDU(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
+#define MCDI_DOORBELL(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
+#define MCDI_STATUS(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
+
+static void siena_mcdi_request(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len)
+{
+ unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
+ unsigned int i;
+ unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4);
+
+ EFX_BUG_ON_PARANOID(hdr_len != 4);
+
+ efx_writed(efx, hdr, pdu);
+
+ for (i = 0; i < inlen_dw; i++)
+ efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i);
+
+ /* Ensure the request is written out before the doorbell */
+ wmb();
+
+ /* ring the doorbell with a distinctive value */
+ _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+}
+
+static bool siena_mcdi_poll_response(struct efx_nic *efx)
+{
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ efx_dword_t hdr;
+
+ efx_readd(efx, &hdr, pdu);
+
+ /* All 1's indicates that shared memory is in reset (and is
+ * not a valid hdr). Wait for it to come out reset before
+ * completing the command
+ */
+ return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff &&
+ EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
+}
+
+static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
+ size_t offset, size_t outlen)
+{
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4);
+ int i;
+
+ for (i = 0; i < outlen_dw; i++)
+ efx_readd(efx, &outbuf[i], pdu + offset + 4 * i);
+}
+
+static int siena_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
+ efx_dword_t reg;
+ u32 value;
+
+ efx_readd(efx, &reg, addr);
+ value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
+
+ if (value == 0)
+ return 0;
+
+ EFX_ZERO_DWORD(reg);
+ efx_writed(efx, &reg, addr);
+
+ /* MAC statistics have been cleared on the NIC; clear the local
+ * copies that we update with efx_update_diff_stat().
+ */
+ nic_data->stats[SIENA_STAT_tx_good_bytes] = 0;
+ nic_data->stats[SIENA_STAT_rx_good_bytes] = 0;
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return -EINTR;
+ else
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * MTD
+ *
+ **************************************************************************
+ */
+
+#ifdef CONFIG_SFC_MTD
+
+struct siena_nvram_type_info {
+ int port;
+ const char *name;
+};
+
+static const struct siena_nvram_type_info siena_nvram_types[] = {
+ [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
+ [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
+ [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
+ [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
+ [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
+ [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
+ [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
+ [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
+ [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
+ [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
+};
+
+static int siena_mtd_probe_partition(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *part,
+ unsigned int type)
+{
+ const struct siena_nvram_type_info *info;
+ size_t size, erase_size;
+ bool protected;
+ int rc;
+
+ if (type >= ARRAY_SIZE(siena_nvram_types) ||
+ siena_nvram_types[type].name == NULL)
+ return -ENODEV;
+
+ info = &siena_nvram_types[type];
+
+ if (info->port != efx_port_num(efx))
+ return -ENODEV;
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ if (rc)
+ return rc;
+ if (protected)
+ return -ENODEV; /* hide it */
+
+ part->nvram_type = type;
+ part->common.dev_type_name = "Siena NVRAM manager";
+ part->common.type_name = info->name;
+
+ part->common.mtd.type = MTD_NORFLASH;
+ part->common.mtd.flags = MTD_CAP_NORFLASH;
+ part->common.mtd.size = size;
+ part->common.mtd.erasesize = erase_size;
+
+ return 0;
+}
+
+static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *parts,
+ size_t n_parts)
+{
+ uint16_t fw_subtype_list[
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
+ size_t i;
+ int rc;
+
+ rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < n_parts; i++)
+ parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type];
+
+ return 0;
+}
+
+static int siena_mtd_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_mtd_partition *parts;
+ u32 nvram_types;
+ unsigned int type;
+ size_t n_parts;
+ int rc;
+
+ ASSERT_RTNL();
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ return rc;
+
+ parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ type = 0;
+ n_parts = 0;
+
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = siena_mtd_probe_partition(efx, &parts[n_parts],
+ type);
+ if (rc == 0)
+ n_parts++;
+ else if (rc != -ENODEV)
+ goto fail;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts);
+ if (rc)
+ goto fail;
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+fail:
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+/**************************************************************************
+ *
+ * Revision-dependent attributes used by efx.c and nic.c
+ *
+ **************************************************************************
+ */
+
+const struct efx_nic_type siena_a0_nic_type = {
+ .mem_map_size = siena_mem_map_size,
+ .probe = siena_probe_nic,
+ .remove = siena_remove_nic,
+ .init = siena_init_nic,
+ .dimension_resources = siena_dimension_resources,
+ .fini = efx_port_dummy_op_void,
+#ifdef CONFIG_EEH
+ .monitor = siena_monitor,
+#else
+ .monitor = NULL,
+#endif
+ .map_reset_reason = efx_mcdi_map_reset_reason,
+ .map_reset_flags = siena_map_reset_flags,
+ .reset = efx_mcdi_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_farch_fini_dmaq,
+ .prepare_flush = siena_prepare_flush,
+ .finish_flush = siena_finish_flush,
+ .prepare_flr = efx_port_dummy_op_void,
+ .finish_flr = efx_farch_finish_flr,
+ .describe_stats = siena_describe_nic_stats,
+ .update_stats = siena_update_nic_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+ .set_id_led = efx_mcdi_set_id_led,
+ .push_irq_moderation = siena_push_irq_moderation,
+ .reconfigure_mac = siena_mac_reconfigure,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = siena_get_wol,
+ .set_wol = siena_set_wol,
+ .resume_wol = siena_init_wol,
+ .test_chip = siena_test_chip,
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = siena_mcdi_request,
+ .mcdi_poll_response = siena_mcdi_poll_response,
+ .mcdi_read_response = siena_mcdi_read_response,
+ .mcdi_poll_reboot = siena_mcdi_poll_reboot,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = efx_farch_legacy_interrupt,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_rss_config = siena_rx_push_rss_config,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_farch_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = siena_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = siena_ptp_write_host_time,
+ .ptp_set_ts_config = siena_ptp_set_ts_config,
+ .sriov_init = efx_siena_sriov_init,
+ .sriov_fini = efx_siena_sriov_fini,
+ .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed,
+ .sriov_wanted = efx_siena_sriov_wanted,
+ .sriov_reset = efx_siena_sriov_reset,
+
+ .revision = EFX_REV_SIENA_A0,
+ .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
+ .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
+ .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
+ .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
+ .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
+ .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
+ .rx_buffer_padding = 0,
+ .can_rx_scatter = true,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 1,
+ .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
+ .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
+ 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
+};
diff --git a/kernel/drivers/net/ethernet/sfc/siena_sriov.c b/kernel/drivers/net/ethernet/sfc/siena_sriov.c
new file mode 100644
index 000000000..fe8343079
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/siena_sriov.c
@@ -0,0 +1,1668 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2010-2012 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "nic.h"
+#include "io.h"
+#include "mcdi.h"
+#include "filter.h"
+#include "mcdi_pcol.h"
+#include "farch_regs.h"
+#include "vfdi.h"
+
+/* Number of longs required to track all the VIs in a VF */
+#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
+
+/* Maximum number of RX queues supported */
+#define VF_MAX_RX_QUEUES 63
+
+/**
+ * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
+ * @VF_TX_FILTER_OFF: Disabled
+ * @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only
+ * 2 TX queues allowed per VF.
+ * @VF_TX_FILTER_ON: Enabled
+ */
+enum efx_vf_tx_filter_mode {
+ VF_TX_FILTER_OFF,
+ VF_TX_FILTER_AUTO,
+ VF_TX_FILTER_ON,
+};
+
+/**
+ * struct efx_vf - Back-end resource and protocol state for a PCI VF
+ * @efx: The Efx NIC owning this VF
+ * @pci_rid: The PCI requester ID for this VF
+ * @pci_name: The PCI name (formatted address) of this VF
+ * @index: Index of VF within its port and PF.
+ * @req: VFDI incoming request work item. Incoming USR_EV events are received
+ * by the NAPI handler, but must be handled by executing MCDI requests
+ * inside a work item.
+ * @req_addr: VFDI incoming request DMA address (in VF's PCI address space).
+ * @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member.
+ * @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member.
+ * @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by
+ * @status_lock
+ * @busy: VFDI request queued to be processed or being processed. Receiving
+ * a VFDI request when @busy is set is an error condition.
+ * @buf: Incoming VFDI requests are DMA from the VF into this buffer.
+ * @buftbl_base: Buffer table entries for this VF start at this index.
+ * @rx_filtering: Receive filtering has been requested by the VF driver.
+ * @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request.
+ * @rx_filter_qid: VF relative qid for RX filter requested by VF.
+ * @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported.
+ * @tx_filter_mode: Transmit MAC filtering mode.
+ * @tx_filter_id: Transmit MAC filter ID.
+ * @addr: The MAC address and outer vlan tag of the VF.
+ * @status_addr: VF DMA address of page for &struct vfdi_status updates.
+ * @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
+ * @peer_page_addrs and @peer_page_count from simultaneous
+ * updates by the VM and consumption by
+ * efx_siena_sriov_update_vf_addr()
+ * @peer_page_addrs: Pointer to an array of guest pages for local addresses.
+ * @peer_page_count: Number of entries in @peer_page_count.
+ * @evq0_addrs: Array of guest pages backing evq0.
+ * @evq0_count: Number of entries in @evq0_addrs.
+ * @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler
+ * to wait for flush completions.
+ * @txq_lock: Mutex for TX queue allocation.
+ * @txq_mask: Mask of initialized transmit queues.
+ * @txq_count: Number of initialized transmit queues.
+ * @rxq_mask: Mask of initialized receive queues.
+ * @rxq_count: Number of initialized receive queues.
+ * @rxq_retry_mask: Mask or receive queues that need to be flushed again
+ * due to flush failure.
+ * @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
+ * @reset_work: Work item to schedule a VF reset.
+ */
+struct efx_vf {
+ struct efx_nic *efx;
+ unsigned int pci_rid;
+ char pci_name[13]; /* dddd:bb:dd.f */
+ unsigned int index;
+ struct work_struct req;
+ u64 req_addr;
+ int req_type;
+ unsigned req_seqno;
+ unsigned msg_seqno;
+ bool busy;
+ struct efx_buffer buf;
+ unsigned buftbl_base;
+ bool rx_filtering;
+ enum efx_filter_flags rx_filter_flags;
+ unsigned rx_filter_qid;
+ int rx_filter_id;
+ enum efx_vf_tx_filter_mode tx_filter_mode;
+ int tx_filter_id;
+ struct vfdi_endpoint addr;
+ u64 status_addr;
+ struct mutex status_lock;
+ u64 *peer_page_addrs;
+ unsigned peer_page_count;
+ u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) /
+ EFX_BUF_SIZE];
+ unsigned evq0_count;
+ wait_queue_head_t flush_waitq;
+ struct mutex txq_lock;
+ unsigned long txq_mask[VI_MASK_LENGTH];
+ unsigned txq_count;
+ unsigned long rxq_mask[VI_MASK_LENGTH];
+ unsigned rxq_count;
+ unsigned long rxq_retry_mask[VI_MASK_LENGTH];
+ atomic_t rxq_retry_count;
+ struct work_struct reset_work;
+};
+
+struct efx_memcpy_req {
+ unsigned int from_rid;
+ void *from_buf;
+ u64 from_addr;
+ unsigned int to_rid;
+ u64 to_addr;
+ unsigned length;
+};
+
+/**
+ * struct efx_local_addr - A MAC address on the vswitch without a VF.
+ *
+ * Siena does not have a switch, so VFs can't transmit data to each
+ * other. Instead the VFs must be made aware of the local addresses
+ * on the vswitch, so that they can arrange for an alternative
+ * software datapath to be used.
+ *
+ * @link: List head for insertion into efx->local_addr_list.
+ * @addr: Ethernet address
+ */
+struct efx_local_addr {
+ struct list_head link;
+ u8 addr[ETH_ALEN];
+};
+
+/**
+ * struct efx_endpoint_page - Page of vfdi_endpoint structures
+ *
+ * @link: List head for insertion into efx->local_page_list.
+ * @ptr: Pointer to page.
+ * @addr: DMA address of page.
+ */
+struct efx_endpoint_page {
+ struct list_head link;
+ void *ptr;
+ dma_addr_t addr;
+};
+
+/* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */
+#define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \
+ ((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid))
+#define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \
+ (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
+ (EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
+#define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \
+ (EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
+ (2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
+
+#define EFX_FIELD_MASK(_field) \
+ ((1 << _field ## _WIDTH) - 1)
+
+/* VFs can only use this many transmit channels */
+static unsigned int vf_max_tx_channels = 2;
+module_param(vf_max_tx_channels, uint, 0444);
+MODULE_PARM_DESC(vf_max_tx_channels,
+ "Limit the number of TX channels VFs can use");
+
+static int max_vfs = -1;
+module_param(max_vfs, int, 0444);
+MODULE_PARM_DESC(max_vfs,
+ "Reduce the number of VFs initialized by the driver");
+
+/* Workqueue used by VFDI communication. We can't use the global
+ * workqueue because it may be running the VF driver's probe()
+ * routine, which will be blocked there waiting for a VFDI response.
+ */
+static struct workqueue_struct *vfdi_workqueue;
+
+static unsigned abs_index(struct efx_vf *vf, unsigned index)
+{
+ return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
+}
+
+static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
+ unsigned *vi_scale_out, unsigned *vf_total_out)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN);
+ unsigned vi_scale, vf_total;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0);
+ MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
+ MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
+ outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_SRIOV_OUT_LEN)
+ return -EIO;
+
+ vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL);
+ vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE);
+ if (vi_scale > EFX_VI_SCALE_MAX)
+ return -EOPNOTSUPP;
+
+ if (vi_scale_out)
+ *vi_scale_out = vi_scale;
+ if (vf_total_out)
+ *vf_total_out = vf_total;
+
+ return 0;
+}
+
+static void efx_siena_sriov_usrev(struct efx_nic *efx, bool enabled)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_CZ_USREV_DIS, enabled ? 0 : 1,
+ FRF_CZ_DFLT_EVQ, nic_data->vfdi_channel->channel);
+ efx_writeo(efx, &reg, FR_CZ_USR_EV_CFG);
+}
+
+static int efx_siena_sriov_memcpy(struct efx_nic *efx,
+ struct efx_memcpy_req *req,
+ unsigned int count)
+{
+ MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1);
+ MCDI_DECLARE_STRUCT_PTR(record);
+ unsigned int index, used;
+ u64 from_addr;
+ u32 from_rid;
+ int rc;
+
+ mb(); /* Finish writing source/reading dest before DMA starts */
+
+ if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM))
+ return -ENOBUFS;
+ used = MC_CMD_MEMCPY_IN_LEN(count);
+
+ for (index = 0; index < count; index++) {
+ record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index);
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS,
+ count);
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
+ req->to_rid);
+ MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR,
+ req->to_addr);
+ if (req->from_buf == NULL) {
+ from_rid = req->from_rid;
+ from_addr = req->from_addr;
+ } else {
+ if (WARN_ON(used + req->length >
+ MCDI_CTL_SDU_LEN_MAX_V1)) {
+ rc = -ENOBUFS;
+ goto out;
+ }
+
+ from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
+ from_addr = used;
+ memcpy(_MCDI_PTR(inbuf, used), req->from_buf,
+ req->length);
+ used += req->length;
+ }
+
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
+ MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR,
+ from_addr);
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
+ req->length);
+
+ ++req;
+ }
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
+out:
+ mb(); /* Don't write source/read dest before DMA is complete */
+
+ return rc;
+}
+
+/* The TX filter is entirely controlled by this driver, and is modified
+ * underneath the feet of the VF
+ */
+static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct efx_filter_spec filter;
+ u16 vlan;
+ int rc;
+
+ if (vf->tx_filter_id != -1) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ vf->tx_filter_id);
+ netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n",
+ vf->pci_name, vf->tx_filter_id);
+ vf->tx_filter_id = -1;
+ }
+
+ if (is_zero_ether_addr(vf->addr.mac_addr))
+ return;
+
+ /* Turn on TX filtering automatically if not explicitly
+ * enabled or disabled.
+ */
+ if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2)
+ vf->tx_filter_mode = VF_TX_FILTER_ON;
+
+ vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
+ efx_filter_init_tx(&filter, abs_index(vf, 0));
+ rc = efx_filter_set_eth_local(&filter,
+ vlan ? vlan : EFX_FILTER_VID_UNSPEC,
+ vf->addr.mac_addr);
+ BUG_ON(rc);
+
+ rc = efx_filter_insert_filter(efx, &filter, true);
+ if (rc < 0) {
+ netif_warn(efx, hw, efx->net_dev,
+ "Unable to migrate tx filter for vf %s\n",
+ vf->pci_name);
+ } else {
+ netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n",
+ vf->pci_name, rc);
+ vf->tx_filter_id = rc;
+ }
+}
+
+/* The RX filter is managed here on behalf of the VF driver */
+static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct efx_filter_spec filter;
+ u16 vlan;
+ int rc;
+
+ if (vf->rx_filter_id != -1) {
+ efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+ vf->rx_filter_id);
+ netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n",
+ vf->pci_name, vf->rx_filter_id);
+ vf->rx_filter_id = -1;
+ }
+
+ if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr))
+ return;
+
+ vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
+ efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED,
+ vf->rx_filter_flags,
+ abs_index(vf, vf->rx_filter_qid));
+ rc = efx_filter_set_eth_local(&filter,
+ vlan ? vlan : EFX_FILTER_VID_UNSPEC,
+ vf->addr.mac_addr);
+ BUG_ON(rc);
+
+ rc = efx_filter_insert_filter(efx, &filter, true);
+ if (rc < 0) {
+ netif_warn(efx, hw, efx->net_dev,
+ "Unable to insert rx filter for vf %s\n",
+ vf->pci_name);
+ } else {
+ netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n",
+ vf->pci_name, rc);
+ vf->rx_filter_id = rc;
+ }
+}
+
+static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
+
+ efx_siena_sriov_reset_tx_filter(vf);
+ efx_siena_sriov_reset_rx_filter(vf);
+ queue_work(vfdi_workqueue, &nic_data->peer_work);
+}
+
+/* Push the peer list to this VF. The caller must hold status_lock to interlock
+ * with VFDI requests, and they must be serialised against manipulation of
+ * local_page_list, either by acquiring local_lock or by running from
+ * efx_siena_sriov_peer_work()
+ */
+static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct vfdi_status *status = nic_data->vfdi_status.addr;
+ struct efx_memcpy_req copy[4];
+ struct efx_endpoint_page *epp;
+ unsigned int pos, count;
+ unsigned data_offset;
+ efx_qword_t event;
+
+ WARN_ON(!mutex_is_locked(&vf->status_lock));
+ WARN_ON(!vf->status_addr);
+
+ status->local = vf->addr;
+ status->generation_end = ++status->generation_start;
+
+ memset(copy, '\0', sizeof(copy));
+ /* Write generation_start */
+ copy[0].from_buf = &status->generation_start;
+ copy[0].to_rid = vf->pci_rid;
+ copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status,
+ generation_start);
+ copy[0].length = sizeof(status->generation_start);
+ /* DMA the rest of the structure (excluding the generations). This
+ * assumes that the non-generation portion of vfdi_status is in
+ * one chunk starting at the version member.
+ */
+ data_offset = offsetof(struct vfdi_status, version);
+ copy[1].from_rid = efx->pci_dev->devfn;
+ copy[1].from_addr = nic_data->vfdi_status.dma_addr + data_offset;
+ copy[1].to_rid = vf->pci_rid;
+ copy[1].to_addr = vf->status_addr + data_offset;
+ copy[1].length = status->length - data_offset;
+
+ /* Copy the peer pages */
+ pos = 2;
+ count = 0;
+ list_for_each_entry(epp, &nic_data->local_page_list, link) {
+ if (count == vf->peer_page_count) {
+ /* The VF driver will know they need to provide more
+ * pages because peer_addr_count is too large.
+ */
+ break;
+ }
+ copy[pos].from_buf = NULL;
+ copy[pos].from_rid = efx->pci_dev->devfn;
+ copy[pos].from_addr = epp->addr;
+ copy[pos].to_rid = vf->pci_rid;
+ copy[pos].to_addr = vf->peer_page_addrs[count];
+ copy[pos].length = EFX_PAGE_SIZE;
+
+ if (++pos == ARRAY_SIZE(copy)) {
+ efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
+ pos = 0;
+ }
+ ++count;
+ }
+
+ /* Write generation_end */
+ copy[pos].from_buf = &status->generation_end;
+ copy[pos].to_rid = vf->pci_rid;
+ copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status,
+ generation_end);
+ copy[pos].length = sizeof(status->generation_end);
+ efx_siena_sriov_memcpy(efx, copy, pos + 1);
+
+ /* Notify the guest */
+ EFX_POPULATE_QWORD_3(event,
+ FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
+ VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
+ VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
+ ++vf->msg_seqno;
+ efx_farch_generate_event(efx,
+ EFX_VI_BASE + vf->index * efx_vf_size(efx),
+ &event);
+}
+
+static void efx_siena_sriov_bufs(struct efx_nic *efx, unsigned offset,
+ u64 *addr, unsigned count)
+{
+ efx_qword_t buf;
+ unsigned pos;
+
+ for (pos = 0; pos < count; ++pos) {
+ EFX_POPULATE_QWORD_3(buf,
+ FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF,
+ addr ? addr[pos] >> 12 : 0,
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+ efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL,
+ &buf, offset + pos);
+ }
+}
+
+static bool bad_vf_index(struct efx_nic *efx, unsigned index)
+{
+ return index >= efx_vf_size(efx);
+}
+
+static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
+{
+ unsigned max_buf_count = max_entry_count *
+ sizeof(efx_qword_t) / EFX_BUF_SIZE;
+
+ return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count);
+}
+
+/* Check that VI specified by per-port index belongs to a VF.
+ * Optionally set VF index and VI index within the VF.
+ */
+static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
+ struct efx_vf **vf_out, unsigned *rel_index_out)
+{
+ unsigned vf_i;
+
+ if (abs_index < EFX_VI_BASE)
+ return true;
+ vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx);
+ if (vf_i >= efx->vf_init_count)
+ return true;
+
+ if (vf_out)
+ *vf_out = efx->vf + vf_i;
+ if (rel_index_out)
+ *rel_index_out = abs_index % efx_vf_size(efx);
+ return false;
+}
+
+static int efx_vfdi_init_evq(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct vfdi_req *req = vf->buf.addr;
+ unsigned vf_evq = req->u.init_evq.index;
+ unsigned buf_count = req->u.init_evq.buf_count;
+ unsigned abs_evq = abs_index(vf, vf_evq);
+ unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq);
+ efx_oword_t reg;
+
+ if (bad_vf_index(efx, vf_evq) ||
+ bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n",
+ vf->pci_name, vf_evq, buf_count);
+ return VFDI_RC_EINVAL;
+ }
+
+ efx_siena_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
+
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, 0,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq);
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AZ_EVQ_EN, 1,
+ FRF_AZ_EVQ_SIZE, __ffs(buf_count),
+ FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
+ efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
+
+ if (vf_evq == 0) {
+ memcpy(vf->evq0_addrs, req->u.init_evq.addr,
+ buf_count * sizeof(u64));
+ vf->evq0_count = buf_count;
+ }
+
+ return VFDI_RC_SUCCESS;
+}
+
+static int efx_vfdi_init_rxq(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct vfdi_req *req = vf->buf.addr;
+ unsigned vf_rxq = req->u.init_rxq.index;
+ unsigned vf_evq = req->u.init_rxq.evq;
+ unsigned buf_count = req->u.init_rxq.buf_count;
+ unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq);
+ unsigned label;
+ efx_oword_t reg;
+
+ if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
+ vf_rxq >= VF_MAX_RX_QUEUES ||
+ bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d "
+ "buf_count %d\n", vf->pci_name, vf_rxq,
+ vf_evq, buf_count);
+ return VFDI_RC_EINVAL;
+ }
+ if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
+ ++vf->rxq_count;
+ efx_siena_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
+
+ label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
+ EFX_POPULATE_OWORD_6(reg,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl,
+ FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
+ FRF_AZ_RX_DESCQ_LABEL, label,
+ FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count),
+ FRF_AZ_RX_DESCQ_JUMBO,
+ !!(req->u.init_rxq.flags &
+ VFDI_RXQ_FLAG_SCATTER_EN),
+ FRF_AZ_RX_DESCQ_EN, 1);
+ efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL,
+ abs_index(vf, vf_rxq));
+
+ return VFDI_RC_SUCCESS;
+}
+
+static int efx_vfdi_init_txq(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct vfdi_req *req = vf->buf.addr;
+ unsigned vf_txq = req->u.init_txq.index;
+ unsigned vf_evq = req->u.init_txq.evq;
+ unsigned buf_count = req->u.init_txq.buf_count;
+ unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq);
+ unsigned label, eth_filt_en;
+ efx_oword_t reg;
+
+ if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) ||
+ vf_txq >= vf_max_tx_channels ||
+ bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Invalid INIT_TXQ from %s: txq %d evq %d "
+ "buf_count %d\n", vf->pci_name, vf_txq,
+ vf_evq, buf_count);
+ return VFDI_RC_EINVAL;
+ }
+
+ mutex_lock(&vf->txq_lock);
+ if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
+ ++vf->txq_count;
+ mutex_unlock(&vf->txq_lock);
+ efx_siena_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
+
+ eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON;
+
+ label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL);
+ EFX_POPULATE_OWORD_8(reg,
+ FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U),
+ FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en,
+ FRF_AZ_TX_DESCQ_EN, 1,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl,
+ FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
+ FRF_AZ_TX_DESCQ_LABEL, label,
+ FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count),
+ FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL,
+ abs_index(vf, vf_txq));
+
+ return VFDI_RC_SUCCESS;
+}
+
+/* Returns true when efx_vfdi_fini_all_queues should wake */
+static bool efx_vfdi_flush_wake(struct efx_vf *vf)
+{
+ /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
+ smp_mb();
+
+ return (!vf->txq_count && !vf->rxq_count) ||
+ atomic_read(&vf->rxq_retry_count);
+}
+
+static void efx_vfdi_flush_clear(struct efx_vf *vf)
+{
+ memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
+ vf->txq_count = 0;
+ memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask));
+ vf->rxq_count = 0;
+ memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask));
+ atomic_set(&vf->rxq_retry_count, 0);
+}
+
+static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ efx_oword_t reg;
+ unsigned count = efx_vf_size(efx);
+ unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
+ unsigned timeout = HZ;
+ unsigned index, rxqs_count;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX);
+ int rc;
+
+ BUILD_BUG_ON(VF_MAX_RX_QUEUES >
+ MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
+
+ rtnl_lock();
+ siena_prepare_flush(efx);
+ rtnl_unlock();
+
+ /* Flush all the initialized queues */
+ rxqs_count = 0;
+ for (index = 0; index < count; ++index) {
+ if (test_bit(index, vf->txq_mask)) {
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ,
+ vf_offset + index);
+ efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ);
+ }
+ if (test_bit(index, vf->rxq_mask)) {
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ rxqs_count, vf_offset + index);
+ rxqs_count++;
+ }
+ }
+
+ atomic_set(&vf->rxq_retry_count, 0);
+ while (timeout && (vf->rxq_count || vf->txq_count)) {
+ rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count),
+ NULL, 0, NULL);
+ WARN_ON(rc < 0);
+
+ timeout = wait_event_timeout(vf->flush_waitq,
+ efx_vfdi_flush_wake(vf),
+ timeout);
+ rxqs_count = 0;
+ for (index = 0; index < count; ++index) {
+ if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
+ atomic_dec(&vf->rxq_retry_count);
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ rxqs_count, vf_offset + index);
+ rxqs_count++;
+ }
+ }
+ }
+
+ rtnl_lock();
+ siena_finish_flush(efx);
+ rtnl_unlock();
+
+ /* Irrespective of success/failure, fini the queues */
+ EFX_ZERO_OWORD(reg);
+ for (index = 0; index < count; ++index) {
+ efx_writeo_table(efx, &reg, FR_BZ_RX_DESC_PTR_TBL,
+ vf_offset + index);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_DESC_PTR_TBL,
+ vf_offset + index);
+ efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL,
+ vf_offset + index);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL,
+ vf_offset + index);
+ }
+ efx_siena_sriov_bufs(efx, vf->buftbl_base, NULL,
+ EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
+ efx_vfdi_flush_clear(vf);
+
+ vf->evq0_count = 0;
+
+ return timeout ? 0 : VFDI_RC_ETIMEDOUT;
+}
+
+static int efx_vfdi_insert_filter(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct vfdi_req *req = vf->buf.addr;
+ unsigned vf_rxq = req->u.mac_filter.rxq;
+ unsigned flags;
+
+ if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Invalid INSERT_FILTER from %s: rxq %d "
+ "flags 0x%x\n", vf->pci_name, vf_rxq,
+ req->u.mac_filter.flags);
+ return VFDI_RC_EINVAL;
+ }
+
+ flags = 0;
+ if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS)
+ flags |= EFX_FILTER_FLAG_RX_RSS;
+ if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER)
+ flags |= EFX_FILTER_FLAG_RX_SCATTER;
+ vf->rx_filter_flags = flags;
+ vf->rx_filter_qid = vf_rxq;
+ vf->rx_filtering = true;
+
+ efx_siena_sriov_reset_rx_filter(vf);
+ queue_work(vfdi_workqueue, &nic_data->peer_work);
+
+ return VFDI_RC_SUCCESS;
+}
+
+static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
+
+ vf->rx_filtering = false;
+ efx_siena_sriov_reset_rx_filter(vf);
+ queue_work(vfdi_workqueue, &nic_data->peer_work);
+
+ return VFDI_RC_SUCCESS;
+}
+
+static int efx_vfdi_set_status_page(struct efx_vf *vf)
+{
+ struct efx_nic *efx = vf->efx;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct vfdi_req *req = vf->buf.addr;
+ u64 page_count = req->u.set_status_page.peer_page_count;
+ u64 max_page_count =
+ (EFX_PAGE_SIZE -
+ offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0]))
+ / sizeof(req->u.set_status_page.peer_page_addr[0]);
+
+ if (!req->u.set_status_page.dma_addr || page_count > max_page_count) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Invalid SET_STATUS_PAGE from %s\n",
+ vf->pci_name);
+ return VFDI_RC_EINVAL;
+ }
+
+ mutex_lock(&nic_data->local_lock);
+ mutex_lock(&vf->status_lock);
+ vf->status_addr = req->u.set_status_page.dma_addr;
+
+ kfree(vf->peer_page_addrs);
+ vf->peer_page_addrs = NULL;
+ vf->peer_page_count = 0;
+
+ if (page_count) {
+ vf->peer_page_addrs = kcalloc(page_count, sizeof(u64),
+ GFP_KERNEL);
+ if (vf->peer_page_addrs) {
+ memcpy(vf->peer_page_addrs,
+ req->u.set_status_page.peer_page_addr,
+ page_count * sizeof(u64));
+ vf->peer_page_count = page_count;
+ }
+ }
+
+ __efx_siena_sriov_push_vf_status(vf);
+ mutex_unlock(&vf->status_lock);
+ mutex_unlock(&nic_data->local_lock);
+
+ return VFDI_RC_SUCCESS;
+}
+
+static int efx_vfdi_clear_status_page(struct efx_vf *vf)
+{
+ mutex_lock(&vf->status_lock);
+ vf->status_addr = 0;
+ mutex_unlock(&vf->status_lock);
+
+ return VFDI_RC_SUCCESS;
+}
+
+typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
+
+static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
+ [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
+ [VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq,
+ [VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq,
+ [VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues,
+ [VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter,
+ [VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters,
+ [VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page,
+ [VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page,
+};
+
+static void efx_siena_sriov_vfdi(struct work_struct *work)
+{
+ struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct efx_nic *efx = vf->efx;
+ struct vfdi_req *req = vf->buf.addr;
+ struct efx_memcpy_req copy[2];
+ int rc;
+
+ /* Copy this page into the local address space */
+ memset(copy, '\0', sizeof(copy));
+ copy[0].from_rid = vf->pci_rid;
+ copy[0].from_addr = vf->req_addr;
+ copy[0].to_rid = efx->pci_dev->devfn;
+ copy[0].to_addr = vf->buf.dma_addr;
+ copy[0].length = EFX_PAGE_SIZE;
+ rc = efx_siena_sriov_memcpy(efx, copy, 1);
+ if (rc) {
+ /* If we can't get the request, we can't reply to the caller */
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Unable to fetch VFDI request from %s rc %d\n",
+ vf->pci_name, -rc);
+ vf->busy = false;
+ return;
+ }
+
+ if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) {
+ rc = vfdi_ops[req->op](vf);
+ if (rc == 0) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "vfdi request %d from %s ok\n",
+ req->op, vf->pci_name);
+ }
+ } else {
+ netif_dbg(efx, hw, efx->net_dev,
+ "ERROR: Unrecognised request %d from VF %s addr "
+ "%llx\n", req->op, vf->pci_name,
+ (unsigned long long)vf->req_addr);
+ rc = VFDI_RC_EOPNOTSUPP;
+ }
+
+ /* Allow subsequent VF requests */
+ vf->busy = false;
+ smp_wmb();
+
+ /* Respond to the request */
+ req->rc = rc;
+ req->op = VFDI_OP_RESPONSE;
+
+ memset(copy, '\0', sizeof(copy));
+ copy[0].from_buf = &req->rc;
+ copy[0].to_rid = vf->pci_rid;
+ copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc);
+ copy[0].length = sizeof(req->rc);
+ copy[1].from_buf = &req->op;
+ copy[1].to_rid = vf->pci_rid;
+ copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op);
+ copy[1].length = sizeof(req->op);
+
+ (void)efx_siena_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
+}
+
+
+
+/* After a reset the event queues inside the guests no longer exist. Fill the
+ * event ring in guest memory with VFDI reset events, then (re-initialise) the
+ * event queue to raise an interrupt. The guest driver will then recover.
+ */
+static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
+ struct efx_buffer *buffer)
+{
+ struct efx_nic *efx = vf->efx;
+ struct efx_memcpy_req copy_req[4];
+ efx_qword_t event;
+ unsigned int pos, count, k, buftbl, abs_evq;
+ efx_oword_t reg;
+ efx_dword_t ptr;
+ int rc;
+
+ BUG_ON(buffer->len != EFX_PAGE_SIZE);
+
+ if (!vf->evq0_count)
+ return;
+ BUG_ON(vf->evq0_count & (vf->evq0_count - 1));
+
+ mutex_lock(&vf->status_lock);
+ EFX_POPULATE_QWORD_3(event,
+ FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
+ VFDI_EV_SEQ, vf->msg_seqno,
+ VFDI_EV_TYPE, VFDI_EV_TYPE_RESET);
+ vf->msg_seqno++;
+ for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event))
+ memcpy(buffer->addr + pos, &event, sizeof(event));
+
+ for (pos = 0; pos < vf->evq0_count; pos += count) {
+ count = min_t(unsigned, vf->evq0_count - pos,
+ ARRAY_SIZE(copy_req));
+ for (k = 0; k < count; k++) {
+ copy_req[k].from_buf = NULL;
+ copy_req[k].from_rid = efx->pci_dev->devfn;
+ copy_req[k].from_addr = buffer->dma_addr;
+ copy_req[k].to_rid = vf->pci_rid;
+ copy_req[k].to_addr = vf->evq0_addrs[pos + k];
+ copy_req[k].length = EFX_PAGE_SIZE;
+ }
+ rc = efx_siena_sriov_memcpy(efx, copy_req, count);
+ if (rc) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Unable to notify %s of reset"
+ ": %d\n", vf->pci_name, -rc);
+ break;
+ }
+ }
+
+ /* Reinitialise, arm and trigger evq0 */
+ abs_evq = abs_index(vf, 0);
+ buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0);
+ efx_siena_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
+
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, 0,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, abs_evq);
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AZ_EVQ_EN, 1,
+ FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count),
+ FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
+ efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
+ EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
+ efx_writed(efx, &ptr, FR_BZ_EVQ_RPTR + FR_BZ_EVQ_RPTR_STEP * abs_evq);
+
+ mutex_unlock(&vf->status_lock);
+}
+
+static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
+{
+ struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct efx_nic *efx = vf->efx;
+ struct efx_buffer buf;
+
+ if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
+ efx_siena_sriov_reset_vf(vf, &buf);
+ efx_nic_free_buffer(efx, &buf);
+ }
+}
+
+static void efx_siena_sriov_handle_no_channel(struct efx_nic *efx)
+{
+ netif_err(efx, drv, efx->net_dev,
+ "ERROR: IOV requires MSI-X and 1 additional interrupt"
+ "vector. IOV disabled\n");
+ efx->vf_count = 0;
+}
+
+static int efx_siena_sriov_probe_channel(struct efx_channel *channel)
+{
+ struct siena_nic_data *nic_data = channel->efx->nic_data;
+ nic_data->vfdi_channel = channel;
+
+ return 0;
+}
+
+static void
+efx_siena_sriov_get_channel_name(struct efx_channel *channel,
+ char *buf, size_t len)
+{
+ snprintf(buf, len, "%s-iov", channel->efx->name);
+}
+
+static const struct efx_channel_type efx_siena_sriov_channel_type = {
+ .handle_no_channel = efx_siena_sriov_handle_no_channel,
+ .pre_probe = efx_siena_sriov_probe_channel,
+ .post_remove = efx_channel_dummy_op_void,
+ .get_name = efx_siena_sriov_get_channel_name,
+ /* no copy operation; channel must not be reallocated */
+ .keep_eventq = true,
+};
+
+void efx_siena_sriov_probe(struct efx_nic *efx)
+{
+ unsigned count;
+
+ if (!max_vfs)
+ return;
+
+ if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count))
+ return;
+ if (count > 0 && count > max_vfs)
+ count = max_vfs;
+
+ /* efx_nic_dimension_resources() will reduce vf_count as appopriate */
+ efx->vf_count = count;
+
+ efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_siena_sriov_channel_type;
+}
+
+/* Copy the list of individual addresses into the vfdi_status.peers
+ * array and auxiliary pages, protected by %local_lock. Drop that lock
+ * and then broadcast the address list to every VF.
+ */
+static void efx_siena_sriov_peer_work(struct work_struct *data)
+{
+ struct siena_nic_data *nic_data = container_of(data,
+ struct siena_nic_data,
+ peer_work);
+ struct efx_nic *efx = nic_data->efx;
+ struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
+ struct efx_vf *vf;
+ struct efx_local_addr *local_addr;
+ struct vfdi_endpoint *peer;
+ struct efx_endpoint_page *epp;
+ struct list_head pages;
+ unsigned int peer_space;
+ unsigned int peer_count;
+ unsigned int pos;
+
+ mutex_lock(&nic_data->local_lock);
+
+ /* Move the existing peer pages off %local_page_list */
+ INIT_LIST_HEAD(&pages);
+ list_splice_tail_init(&nic_data->local_page_list, &pages);
+
+ /* Populate the VF addresses starting from entry 1 (entry 0 is
+ * the PF address)
+ */
+ peer = vfdi_status->peers + 1;
+ peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
+ peer_count = 1;
+ for (pos = 0; pos < efx->vf_count; ++pos) {
+ vf = efx->vf + pos;
+
+ mutex_lock(&vf->status_lock);
+ if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
+ *peer++ = vf->addr;
+ ++peer_count;
+ --peer_space;
+ BUG_ON(peer_space == 0);
+ }
+ mutex_unlock(&vf->status_lock);
+ }
+
+ /* Fill the remaining addresses */
+ list_for_each_entry(local_addr, &nic_data->local_addr_list, link) {
+ ether_addr_copy(peer->mac_addr, local_addr->addr);
+ peer->tci = 0;
+ ++peer;
+ ++peer_count;
+ if (--peer_space == 0) {
+ if (list_empty(&pages)) {
+ epp = kmalloc(sizeof(*epp), GFP_KERNEL);
+ if (!epp)
+ break;
+ epp->ptr = dma_alloc_coherent(
+ &efx->pci_dev->dev, EFX_PAGE_SIZE,
+ &epp->addr, GFP_KERNEL);
+ if (!epp->ptr) {
+ kfree(epp);
+ break;
+ }
+ } else {
+ epp = list_first_entry(
+ &pages, struct efx_endpoint_page, link);
+ list_del(&epp->link);
+ }
+
+ list_add_tail(&epp->link, &nic_data->local_page_list);
+ peer = (struct vfdi_endpoint *)epp->ptr;
+ peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint);
+ }
+ }
+ vfdi_status->peer_count = peer_count;
+ mutex_unlock(&nic_data->local_lock);
+
+ /* Free any now unused endpoint pages */
+ while (!list_empty(&pages)) {
+ epp = list_first_entry(
+ &pages, struct efx_endpoint_page, link);
+ list_del(&epp->link);
+ dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
+ epp->ptr, epp->addr);
+ kfree(epp);
+ }
+
+ /* Finally, push the pages */
+ for (pos = 0; pos < efx->vf_count; ++pos) {
+ vf = efx->vf + pos;
+
+ mutex_lock(&vf->status_lock);
+ if (vf->status_addr)
+ __efx_siena_sriov_push_vf_status(vf);
+ mutex_unlock(&vf->status_lock);
+ }
+}
+
+static void efx_siena_sriov_free_local(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct efx_local_addr *local_addr;
+ struct efx_endpoint_page *epp;
+
+ while (!list_empty(&nic_data->local_addr_list)) {
+ local_addr = list_first_entry(&nic_data->local_addr_list,
+ struct efx_local_addr, link);
+ list_del(&local_addr->link);
+ kfree(local_addr);
+ }
+
+ while (!list_empty(&nic_data->local_page_list)) {
+ epp = list_first_entry(&nic_data->local_page_list,
+ struct efx_endpoint_page, link);
+ list_del(&epp->link);
+ dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
+ epp->ptr, epp->addr);
+ kfree(epp);
+ }
+}
+
+static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
+{
+ unsigned index;
+ struct efx_vf *vf;
+
+ efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
+ if (!efx->vf)
+ return -ENOMEM;
+
+ for (index = 0; index < efx->vf_count; ++index) {
+ vf = efx->vf + index;
+
+ vf->efx = efx;
+ vf->index = index;
+ vf->rx_filter_id = -1;
+ vf->tx_filter_mode = VF_TX_FILTER_AUTO;
+ vf->tx_filter_id = -1;
+ INIT_WORK(&vf->req, efx_siena_sriov_vfdi);
+ INIT_WORK(&vf->reset_work, efx_siena_sriov_reset_vf_work);
+ init_waitqueue_head(&vf->flush_waitq);
+ mutex_init(&vf->status_lock);
+ mutex_init(&vf->txq_lock);
+ }
+
+ return 0;
+}
+
+static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
+{
+ struct efx_vf *vf;
+ unsigned int pos;
+
+ for (pos = 0; pos < efx->vf_count; ++pos) {
+ vf = efx->vf + pos;
+
+ efx_nic_free_buffer(efx, &vf->buf);
+ kfree(vf->peer_page_addrs);
+ vf->peer_page_addrs = NULL;
+ vf->peer_page_count = 0;
+
+ vf->evq0_count = 0;
+ }
+}
+
+static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
+{
+ struct pci_dev *pci_dev = efx->pci_dev;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ unsigned index, devfn, sriov, buftbl_base;
+ u16 offset, stride;
+ struct efx_vf *vf;
+ int rc;
+
+ sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!sriov)
+ return -ENOENT;
+
+ pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset);
+ pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride);
+
+ buftbl_base = nic_data->vf_buftbl_base;
+ devfn = pci_dev->devfn + offset;
+ for (index = 0; index < efx->vf_count; ++index) {
+ vf = efx->vf + index;
+
+ /* Reserve buffer entries */
+ vf->buftbl_base = buftbl_base;
+ buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx);
+
+ vf->pci_rid = devfn;
+ snprintf(vf->pci_name, sizeof(vf->pci_name),
+ "%04x:%02x:%02x.%d",
+ pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+ rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
+ GFP_KERNEL);
+ if (rc)
+ goto fail;
+
+ devfn += stride;
+ }
+
+ return 0;
+
+fail:
+ efx_siena_sriov_vfs_fini(efx);
+ return rc;
+}
+
+int efx_siena_sriov_init(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct vfdi_status *vfdi_status;
+ int rc;
+
+ /* Ensure there's room for vf_channel */
+ BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE);
+ /* Ensure that VI_BASE is aligned on VI_SCALE */
+ BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1));
+
+ if (efx->vf_count == 0)
+ return 0;
+
+ rc = efx_siena_sriov_cmd(efx, true, NULL, NULL);
+ if (rc)
+ goto fail_cmd;
+
+ rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status,
+ sizeof(*vfdi_status), GFP_KERNEL);
+ if (rc)
+ goto fail_status;
+ vfdi_status = nic_data->vfdi_status.addr;
+ memset(vfdi_status, 0, sizeof(*vfdi_status));
+ vfdi_status->version = 1;
+ vfdi_status->length = sizeof(*vfdi_status);
+ vfdi_status->max_tx_channels = vf_max_tx_channels;
+ vfdi_status->vi_scale = efx->vi_scale;
+ vfdi_status->rss_rxq_count = efx->rss_spread;
+ vfdi_status->peer_count = 1 + efx->vf_count;
+ vfdi_status->timer_quantum_ns = efx->timer_quantum_ns;
+
+ rc = efx_siena_sriov_vf_alloc(efx);
+ if (rc)
+ goto fail_alloc;
+
+ mutex_init(&nic_data->local_lock);
+ INIT_WORK(&nic_data->peer_work, efx_siena_sriov_peer_work);
+ INIT_LIST_HEAD(&nic_data->local_addr_list);
+ INIT_LIST_HEAD(&nic_data->local_page_list);
+
+ rc = efx_siena_sriov_vfs_init(efx);
+ if (rc)
+ goto fail_vfs;
+
+ rtnl_lock();
+ ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr);
+ efx->vf_init_count = efx->vf_count;
+ rtnl_unlock();
+
+ efx_siena_sriov_usrev(efx, true);
+
+ /* At this point we must be ready to accept VFDI requests */
+
+ rc = pci_enable_sriov(efx->pci_dev, efx->vf_count);
+ if (rc)
+ goto fail_pci;
+
+ netif_info(efx, probe, net_dev,
+ "enabled SR-IOV for %d VFs, %d VI per VF\n",
+ efx->vf_count, efx_vf_size(efx));
+ return 0;
+
+fail_pci:
+ efx_siena_sriov_usrev(efx, false);
+ rtnl_lock();
+ efx->vf_init_count = 0;
+ rtnl_unlock();
+ efx_siena_sriov_vfs_fini(efx);
+fail_vfs:
+ cancel_work_sync(&nic_data->peer_work);
+ efx_siena_sriov_free_local(efx);
+ kfree(efx->vf);
+fail_alloc:
+ efx_nic_free_buffer(efx, &nic_data->vfdi_status);
+fail_status:
+ efx_siena_sriov_cmd(efx, false, NULL, NULL);
+fail_cmd:
+ return rc;
+}
+
+void efx_siena_sriov_fini(struct efx_nic *efx)
+{
+ struct efx_vf *vf;
+ unsigned int pos;
+ struct siena_nic_data *nic_data = efx->nic_data;
+
+ if (efx->vf_init_count == 0)
+ return;
+
+ /* Disable all interfaces to reconfiguration */
+ BUG_ON(nic_data->vfdi_channel->enabled);
+ efx_siena_sriov_usrev(efx, false);
+ rtnl_lock();
+ efx->vf_init_count = 0;
+ rtnl_unlock();
+
+ /* Flush all reconfiguration work */
+ for (pos = 0; pos < efx->vf_count; ++pos) {
+ vf = efx->vf + pos;
+ cancel_work_sync(&vf->req);
+ cancel_work_sync(&vf->reset_work);
+ }
+ cancel_work_sync(&nic_data->peer_work);
+
+ pci_disable_sriov(efx->pci_dev);
+
+ /* Tear down back-end state */
+ efx_siena_sriov_vfs_fini(efx);
+ efx_siena_sriov_free_local(efx);
+ kfree(efx->vf);
+ efx_nic_free_buffer(efx, &nic_data->vfdi_status);
+ efx_siena_sriov_cmd(efx, false, NULL, NULL);
+}
+
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_vf *vf;
+ unsigned qid, seq, type, data;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
+
+ /* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */
+ BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0);
+ seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ);
+ type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE);
+ data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA);
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "USR_EV event from qid %d seq 0x%x type %d data 0x%x\n",
+ qid, seq, type, data);
+
+ if (map_vi_index(efx, qid, &vf, NULL))
+ return;
+ if (vf->busy)
+ goto error;
+
+ if (type == VFDI_EV_TYPE_REQ_WORD0) {
+ /* Resynchronise */
+ vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
+ vf->req_seqno = seq + 1;
+ vf->req_addr = 0;
+ } else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type)
+ goto error;
+
+ switch (vf->req_type) {
+ case VFDI_EV_TYPE_REQ_WORD0:
+ case VFDI_EV_TYPE_REQ_WORD1:
+ case VFDI_EV_TYPE_REQ_WORD2:
+ vf->req_addr |= (u64)data << (vf->req_type << 4);
+ ++vf->req_type;
+ return;
+
+ case VFDI_EV_TYPE_REQ_WORD3:
+ vf->req_addr |= (u64)data << 48;
+ vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
+ vf->busy = true;
+ queue_work(vfdi_workqueue, &vf->req);
+ return;
+ }
+
+error:
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "ERROR: Screaming VFDI request from %s\n",
+ vf->pci_name);
+ /* Reset the request and sequence number */
+ vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
+ vf->req_seqno = seq + 1;
+}
+
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
+{
+ struct efx_vf *vf;
+
+ if (vf_i > efx->vf_init_count)
+ return;
+ vf = efx->vf + vf_i;
+ netif_info(efx, hw, efx->net_dev,
+ "FLR on VF %s\n", vf->pci_name);
+
+ vf->status_addr = 0;
+ efx_vfdi_remove_all_filters(vf);
+ efx_vfdi_flush_clear(vf);
+
+ vf->evq0_count = 0;
+}
+
+void efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
+
+ if (!efx->vf_init_count)
+ return;
+ ether_addr_copy(vfdi_status->peers[0].mac_addr,
+ efx->net_dev->dev_addr);
+ queue_work(vfdi_workqueue, &nic_data->peer_work);
+}
+
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_vf *vf;
+ unsigned queue, qid;
+
+ queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+ if (map_vi_index(efx, queue, &vf, &qid))
+ return;
+ /* Ignore flush completions triggered by an FLR */
+ if (!test_bit(qid, vf->txq_mask))
+ return;
+
+ __clear_bit(qid, vf->txq_mask);
+ --vf->txq_count;
+
+ if (efx_vfdi_flush_wake(vf))
+ wake_up(&vf->flush_waitq);
+}
+
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_vf *vf;
+ unsigned ev_failed, queue, qid;
+
+ queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ ev_failed = EFX_QWORD_FIELD(*event,
+ FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+ if (map_vi_index(efx, queue, &vf, &qid))
+ return;
+ if (!test_bit(qid, vf->rxq_mask))
+ return;
+
+ if (ev_failed) {
+ set_bit(qid, vf->rxq_retry_mask);
+ atomic_inc(&vf->rxq_retry_count);
+ } else {
+ __clear_bit(qid, vf->rxq_mask);
+ --vf->rxq_count;
+ }
+ if (efx_vfdi_flush_wake(vf))
+ wake_up(&vf->flush_waitq);
+}
+
+/* Called from napi. Schedule the reset work item */
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
+{
+ struct efx_vf *vf;
+ unsigned int rel;
+
+ if (map_vi_index(efx, dmaq, &vf, &rel))
+ return;
+
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+ "VF %d DMA Q %d reports descriptor fetch error.\n",
+ vf->index, rel);
+ queue_work(vfdi_workqueue, &vf->reset_work);
+}
+
+/* Reset all VFs */
+void efx_siena_sriov_reset(struct efx_nic *efx)
+{
+ unsigned int vf_i;
+ struct efx_buffer buf;
+ struct efx_vf *vf;
+
+ ASSERT_RTNL();
+
+ if (efx->vf_init_count == 0)
+ return;
+
+ efx_siena_sriov_usrev(efx, true);
+ (void)efx_siena_sriov_cmd(efx, true, NULL, NULL);
+
+ if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
+ return;
+
+ for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
+ vf = efx->vf + vf_i;
+ efx_siena_sriov_reset_vf(vf, &buf);
+ }
+
+ efx_nic_free_buffer(efx, &buf);
+}
+
+int efx_init_sriov(void)
+{
+ /* A single threaded workqueue is sufficient. efx_siena_sriov_vfdi() and
+ * efx_siena_sriov_peer_work() spend almost all their time sleeping for
+ * MCDI to complete anyway
+ */
+ vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
+ if (!vfdi_workqueue)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void efx_fini_sriov(void)
+{
+ destroy_workqueue(vfdi_workqueue);
+}
+
+int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_vf *vf;
+
+ if (vf_i >= efx->vf_init_count)
+ return -EINVAL;
+ vf = efx->vf + vf_i;
+
+ mutex_lock(&vf->status_lock);
+ ether_addr_copy(vf->addr.mac_addr, mac);
+ __efx_siena_sriov_update_vf_addr(vf);
+ mutex_unlock(&vf->status_lock);
+
+ return 0;
+}
+
+int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
+ u16 vlan, u8 qos)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_vf *vf;
+ u16 tci;
+
+ if (vf_i >= efx->vf_init_count)
+ return -EINVAL;
+ vf = efx->vf + vf_i;
+
+ mutex_lock(&vf->status_lock);
+ tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
+ vf->addr.tci = htons(tci);
+ __efx_siena_sriov_update_vf_addr(vf);
+ mutex_unlock(&vf->status_lock);
+
+ return 0;
+}
+
+int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_vf *vf;
+ int rc;
+
+ if (vf_i >= efx->vf_init_count)
+ return -EINVAL;
+ vf = efx->vf + vf_i;
+
+ mutex_lock(&vf->txq_lock);
+ if (vf->txq_count == 0) {
+ vf->tx_filter_mode =
+ spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF;
+ rc = 0;
+ } else {
+ /* This cannot be changed while TX queues are running */
+ rc = -EBUSY;
+ }
+ mutex_unlock(&vf->txq_lock);
+ return rc;
+}
+
+int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_vf *vf;
+ u16 tci;
+
+ if (vf_i >= efx->vf_init_count)
+ return -EINVAL;
+ vf = efx->vf + vf_i;
+
+ ivi->vf = vf_i;
+ ether_addr_copy(ivi->mac, vf->addr.mac_addr);
+ ivi->max_tx_rate = 0;
+ ivi->min_tx_rate = 0;
+ tci = ntohs(vf->addr.tci);
+ ivi->vlan = tci & VLAN_VID_MASK;
+ ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
+ ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON;
+
+ return 0;
+}
+
diff --git a/kernel/drivers/net/ethernet/sfc/tenxpress.c b/kernel/drivers/net/ethernet/sfc/tenxpress.c
new file mode 100644
index 000000000..2c90e6b31
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/tenxpress.c
@@ -0,0 +1,494 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include "efx.h"
+#include "mdio_10g.h"
+#include "nic.h"
+#include "phy.h"
+#include "workarounds.h"
+
+/* We expect these MMDs to be in the package. */
+#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
+ MDIO_DEVS_PCS | \
+ MDIO_DEVS_PHYXS | \
+ MDIO_DEVS_AN)
+
+#define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
+ (1 << LOOPBACK_PCS) | \
+ (1 << LOOPBACK_PMAPMD) | \
+ (1 << LOOPBACK_PHYXS_WS))
+
+/* We complain if we fail to see the link partner as 10G capable this many
+ * times in a row (must be > 1 as sampling the autoneg. registers is racy)
+ */
+#define MAX_BAD_LP_TRIES (5)
+
+/* Extended control register */
+#define PMA_PMD_XCONTROL_REG 49152
+#define PMA_PMD_EXT_GMII_EN_LBN 1
+#define PMA_PMD_EXT_GMII_EN_WIDTH 1
+#define PMA_PMD_EXT_CLK_OUT_LBN 2
+#define PMA_PMD_EXT_CLK_OUT_WIDTH 1
+#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
+#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
+#define PMA_PMD_EXT_CLK312_WIDTH 1
+#define PMA_PMD_EXT_LPOWER_LBN 12
+#define PMA_PMD_EXT_LPOWER_WIDTH 1
+#define PMA_PMD_EXT_ROBUST_LBN 14
+#define PMA_PMD_EXT_ROBUST_WIDTH 1
+#define PMA_PMD_EXT_SSR_LBN 15
+#define PMA_PMD_EXT_SSR_WIDTH 1
+
+/* extended status register */
+#define PMA_PMD_XSTATUS_REG 49153
+#define PMA_PMD_XSTAT_MDIX_LBN 14
+#define PMA_PMD_XSTAT_FLP_LBN (12)
+
+/* LED control register */
+#define PMA_PMD_LED_CTRL_REG 49159
+#define PMA_PMA_LED_ACTIVITY_LBN (3)
+
+/* LED function override register */
+#define PMA_PMD_LED_OVERR_REG 49161
+/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/
+#define PMA_PMD_LED_LINK_LBN (0)
+#define PMA_PMD_LED_SPEED_LBN (2)
+#define PMA_PMD_LED_TX_LBN (4)
+#define PMA_PMD_LED_RX_LBN (6)
+/* Override settings */
+#define PMA_PMD_LED_AUTO (0) /* H/W control */
+#define PMA_PMD_LED_ON (1)
+#define PMA_PMD_LED_OFF (2)
+#define PMA_PMD_LED_FLASH (3)
+#define PMA_PMD_LED_MASK 3
+/* All LEDs under hardware control */
+/* Green and Amber under hardware control, Red off */
+#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
+
+#define PMA_PMD_SPEED_ENABLE_REG 49192
+#define PMA_PMD_100TX_ADV_LBN 1
+#define PMA_PMD_100TX_ADV_WIDTH 1
+#define PMA_PMD_1000T_ADV_LBN 2
+#define PMA_PMD_1000T_ADV_WIDTH 1
+#define PMA_PMD_10000T_ADV_LBN 3
+#define PMA_PMD_10000T_ADV_WIDTH 1
+#define PMA_PMD_SPEED_LBN 4
+#define PMA_PMD_SPEED_WIDTH 4
+
+/* Misc register defines */
+#define PCS_CLOCK_CTRL_REG 55297
+#define PLL312_RST_N_LBN 2
+
+#define PCS_SOFT_RST2_REG 55302
+#define SERDES_RST_N_LBN 13
+#define XGXS_RST_N_LBN 12
+
+#define PCS_TEST_SELECT_REG 55303 /* PRM 10.5.8 */
+#define CLK312_EN_LBN 3
+
+/* PHYXS registers */
+#define PHYXS_XCONTROL_REG 49152
+#define PHYXS_RESET_LBN 15
+#define PHYXS_RESET_WIDTH 1
+
+#define PHYXS_TEST1 (49162)
+#define LOOPBACK_NEAR_LBN (8)
+#define LOOPBACK_NEAR_WIDTH (1)
+
+/* Boot status register */
+#define PCS_BOOT_STATUS_REG 53248
+#define PCS_BOOT_FATAL_ERROR_LBN 0
+#define PCS_BOOT_PROGRESS_LBN 1
+#define PCS_BOOT_PROGRESS_WIDTH 2
+#define PCS_BOOT_PROGRESS_INIT 0
+#define PCS_BOOT_PROGRESS_WAIT_MDIO 1
+#define PCS_BOOT_PROGRESS_CHECKSUM 2
+#define PCS_BOOT_PROGRESS_JUMP 3
+#define PCS_BOOT_DOWNLOAD_WAIT_LBN 3
+#define PCS_BOOT_CODE_STARTED_LBN 4
+
+/* 100M/1G PHY registers */
+#define GPHY_XCONTROL_REG 49152
+#define GPHY_ISOLATE_LBN 10
+#define GPHY_ISOLATE_WIDTH 1
+#define GPHY_DUPLEX_LBN 8
+#define GPHY_DUPLEX_WIDTH 1
+#define GPHY_LOOPBACK_NEAR_LBN 14
+#define GPHY_LOOPBACK_NEAR_WIDTH 1
+
+#define C22EXT_STATUS_REG 49153
+#define C22EXT_STATUS_LINK_LBN 2
+#define C22EXT_STATUS_LINK_WIDTH 1
+
+#define C22EXT_MSTSLV_CTRL 49161
+#define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8
+#define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9
+
+#define C22EXT_MSTSLV_STATUS 49162
+#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10
+#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11
+
+/* Time to wait between powering down the LNPGA and turning off the power
+ * rails */
+#define LNPGA_PDOWN_WAIT (HZ / 5)
+
+struct tenxpress_phy_data {
+ enum efx_loopback_mode loopback_mode;
+ enum efx_phy_mode phy_mode;
+ int bad_lp_tries;
+};
+
+static int tenxpress_init(struct efx_nic *efx)
+{
+ /* Enable 312.5 MHz clock */
+ efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
+ 1 << CLK312_EN_LBN);
+
+ /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
+ efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
+ 1 << PMA_PMA_LED_ACTIVITY_LBN, true);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
+ SFX7101_PMA_PMD_LED_DEFAULT);
+
+ return 0;
+}
+
+static int tenxpress_phy_probe(struct efx_nic *efx)
+{
+ struct tenxpress_phy_data *phy_data;
+
+ /* Allocate phy private storage */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+
+ efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45;
+
+ efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+
+ efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full);
+
+ return 0;
+}
+
+static int tenxpress_phy_init(struct efx_nic *efx)
+{
+ int rc;
+
+ falcon_board(efx)->type->init_phy(efx);
+
+ if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
+ rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
+ if (rc < 0)
+ return rc;
+
+ rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = tenxpress_init(efx);
+ if (rc < 0)
+ return rc;
+
+ /* Reinitialise flow control settings */
+ efx_link_set_wanted_fc(efx, efx->wanted_fc);
+ efx_mdio_an_reconfigure(efx);
+
+ schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
+
+ /* Let XGXS and SerDes out of reset */
+ falcon_reset_xaui(efx);
+
+ return 0;
+}
+
+/* Perform a "special software reset" on the PHY. The caller is
+ * responsible for saving and restoring the PHY hardware registers
+ * properly, and masking/unmasking LASI */
+static int tenxpress_special_reset(struct efx_nic *efx)
+{
+ int rc, reg;
+
+ /* The XGMAC clock is driven from the SFX7101 312MHz clock, so
+ * a special software reset can glitch the XGMAC sufficiently for stats
+ * requests to fail. */
+ falcon_stop_nic_stats(efx);
+
+ /* Initiate reset */
+ reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
+ reg |= (1 << PMA_PMD_EXT_SSR_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
+
+ mdelay(200);
+
+ /* Wait for the blocks to come out of reset */
+ rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
+ if (rc < 0)
+ goto out;
+
+ /* Try and reconfigure the device */
+ rc = tenxpress_init(efx);
+ if (rc < 0)
+ goto out;
+
+ /* Wait for the XGXS state machine to churn */
+ mdelay(10);
+out:
+ falcon_start_nic_stats(efx);
+ return rc;
+}
+
+static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
+{
+ struct tenxpress_phy_data *pd = efx->phy_data;
+ bool bad_lp;
+ int reg;
+
+ if (link_ok) {
+ bad_lp = false;
+ } else {
+ /* Check that AN has started but not completed. */
+ reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_STAT1);
+ if (!(reg & MDIO_AN_STAT1_LPABLE))
+ return; /* LP status is unknown */
+ bad_lp = !(reg & MDIO_AN_STAT1_COMPLETE);
+ if (bad_lp)
+ pd->bad_lp_tries++;
+ }
+
+ /* Nothing to do if all is well and was previously so. */
+ if (!pd->bad_lp_tries)
+ return;
+
+ /* Use the RX (red) LED as an error indicator once we've seen AN
+ * failure several times in a row, and also log a message. */
+ if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
+ reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
+ PMA_PMD_LED_OVERR_REG);
+ reg &= ~(PMA_PMD_LED_MASK << PMA_PMD_LED_RX_LBN);
+ if (!bad_lp) {
+ reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN;
+ } else {
+ reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN;
+ netif_err(efx, link, efx->net_dev,
+ "appears to be plugged into a port"
+ " that is not 10GBASE-T capable. The PHY"
+ " supports 10GBASE-T ONLY, so no link can"
+ " be established\n");
+ }
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ PMA_PMD_LED_OVERR_REG, reg);
+ pd->bad_lp_tries = bad_lp;
+ }
+}
+
+static bool sfx7101_link_ok(struct efx_nic *efx)
+{
+ return efx_mdio_links_ok(efx,
+ MDIO_DEVS_PMAPMD |
+ MDIO_DEVS_PCS |
+ MDIO_DEVS_PHYXS);
+}
+
+static void tenxpress_ext_loopback(struct efx_nic *efx)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
+ 1 << LOOPBACK_NEAR_LBN,
+ efx->loopback_mode == LOOPBACK_PHYXS);
+}
+
+static void tenxpress_low_power(struct efx_nic *efx)
+{
+ efx_mdio_set_mmds_lpower(
+ efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
+ TENXPRESS_REQUIRED_DEVS);
+}
+
+static int tenxpress_phy_reconfigure(struct efx_nic *efx)
+{
+ struct tenxpress_phy_data *phy_data = efx->phy_data;
+ bool phy_mode_change, loop_reset;
+
+ if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
+ phy_data->phy_mode = efx->phy_mode;
+ return 0;
+ }
+
+ phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
+ phy_data->phy_mode != PHY_MODE_NORMAL);
+ loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, LOOPBACKS_EXTERNAL(efx)) ||
+ LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
+
+ if (loop_reset || phy_mode_change) {
+ tenxpress_special_reset(efx);
+ falcon_reset_xaui(efx);
+ }
+
+ tenxpress_low_power(efx);
+ efx_mdio_transmit_disable(efx);
+ efx_mdio_phy_reconfigure(efx);
+ tenxpress_ext_loopback(efx);
+ efx_mdio_an_reconfigure(efx);
+
+ phy_data->loopback_mode = efx->loopback_mode;
+ phy_data->phy_mode = efx->phy_mode;
+
+ return 0;
+}
+
+static void
+tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+
+/* Poll for link state changes */
+static bool tenxpress_phy_poll(struct efx_nic *efx)
+{
+ struct efx_link_state old_state = efx->link_state;
+
+ efx->link_state.up = sfx7101_link_ok(efx);
+ efx->link_state.speed = 10000;
+ efx->link_state.fd = true;
+ efx->link_state.fc = efx_mdio_get_pause(efx);
+
+ sfx7101_check_bad_lp(efx, efx->link_state.up);
+
+ return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+static void sfx7101_phy_fini(struct efx_nic *efx)
+{
+ int reg;
+
+ /* Power down the LNPGA */
+ reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
+
+ /* Waiting here ensures that the board fini, which can turn
+ * off the power to the PHY, won't get run until the LNPGA
+ * powerdown has been given long enough to complete. */
+ schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
+}
+
+static void tenxpress_phy_remove(struct efx_nic *efx)
+{
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+}
+
+
+/* Override the RX, TX and link LEDs */
+void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+ int reg;
+
+ switch (mode) {
+ case EFX_LED_OFF:
+ reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) |
+ (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) |
+ (PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN);
+ break;
+ case EFX_LED_ON:
+ reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) |
+ (PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) |
+ (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
+ break;
+ default:
+ reg = SFX7101_PMA_PMD_LED_DEFAULT;
+ break;
+ }
+
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
+}
+
+static const char *const sfx7101_test_names[] = {
+ "bist"
+};
+
+static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index)
+{
+ if (index < ARRAY_SIZE(sfx7101_test_names))
+ return sfx7101_test_names[index];
+ return NULL;
+}
+
+static int
+sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
+{
+ int rc;
+
+ if (!(flags & ETH_TEST_FL_OFFLINE))
+ return 0;
+
+ /* BIST is automatically run after a special software reset */
+ rc = tenxpress_special_reset(efx);
+ results[0] = rc ? -1 : 1;
+
+ efx_mdio_an_reconfigure(efx);
+
+ return rc;
+}
+
+static void
+tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ u32 adv = 0, lpa = 0;
+ int reg;
+
+ reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
+ if (reg & MDIO_AN_10GBT_CTRL_ADV10G)
+ adv |= ADVERTISED_10000baseT_Full;
+ reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+ if (reg & MDIO_AN_10GBT_STAT_LP10G)
+ lpa |= ADVERTISED_10000baseT_Full;
+
+ mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
+
+ /* In loopback, the PHY automatically brings up the correct interface,
+ * but doesn't advertise the correct speed. So override it */
+ if (LOOPBACK_EXTERNAL(efx))
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+}
+
+static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ if (!ecmd->autoneg)
+ return -EINVAL;
+
+ return efx_mdio_set_settings(efx, ecmd);
+}
+
+static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV10G,
+ advertising & ADVERTISED_10000baseT_Full);
+}
+
+const struct efx_phy_operations falcon_sfx7101_phy_ops = {
+ .probe = tenxpress_phy_probe,
+ .init = tenxpress_phy_init,
+ .reconfigure = tenxpress_phy_reconfigure,
+ .poll = tenxpress_phy_poll,
+ .fini = sfx7101_phy_fini,
+ .remove = tenxpress_phy_remove,
+ .get_settings = tenxpress_get_settings,
+ .set_settings = tenxpress_set_settings,
+ .set_npage_adv = sfx7101_set_npage_adv,
+ .test_alive = efx_mdio_test_alive,
+ .test_name = sfx7101_test_name,
+ .run_tests = sfx7101_run_tests,
+};
diff --git a/kernel/drivers/net/ethernet/sfc/tx.c b/kernel/drivers/net/ethernet/sfc/tx.c
new file mode 100644
index 000000000..aaf298751
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/tx.c
@@ -0,0 +1,1332 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/pci.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/ipv6.h>
+#include <linux/if_ether.h>
+#include <linux/highmem.h>
+#include <linux/cache.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "io.h"
+#include "nic.h"
+#include "workarounds.h"
+#include "ef10_regs.h"
+
+#ifdef EFX_USE_PIO
+
+#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
+#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
+unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
+
+#endif /* EFX_USE_PIO */
+
+static inline unsigned int
+efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
+{
+ return tx_queue->insert_count & tx_queue->ptr_mask;
+}
+
+static inline struct efx_tx_buffer *
+__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+ return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
+}
+
+static inline struct efx_tx_buffer *
+efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer =
+ __efx_tx_queue_get_insert_buffer(tx_queue);
+
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(buffer->flags);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
+
+ return buffer;
+}
+
+static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ if (buffer->unmap_len) {
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
+ dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
+ if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
+ dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
+ buffer->unmap_len = 0;
+ }
+
+ if (buffer->flags & EFX_TX_BUF_SKB) {
+ (*pkts_compl)++;
+ (*bytes_compl) += buffer->skb->len;
+ dev_consume_skb_any((struct sk_buff *)buffer->skb);
+ netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
+ "TX queue %d transmission id %x complete\n",
+ tx_queue->queue, tx_queue->read_count);
+ } else if (buffer->flags & EFX_TX_BUF_HEAP) {
+ kfree(buffer->heap_buf);
+ }
+
+ buffer->len = 0;
+ buffer->flags = 0;
+}
+
+static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb);
+
+static inline unsigned
+efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
+{
+ /* Depending on the NIC revision, we can use descriptor
+ * lengths up to 8K or 8K-1. However, since PCI Express
+ * devices must split read requests at 4K boundaries, there is
+ * little benefit from using descriptors that cross those
+ * boundaries and we keep things simple by not doing so.
+ */
+ unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
+
+ /* Work around hardware bug for unaligned buffers. */
+ if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
+ len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
+
+ return len;
+}
+
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+{
+ /* Header and payload descriptor for each output segment, plus
+ * one for every input fragment boundary within a segment
+ */
+ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+ /* Possibly one more per segment for the alignment workaround,
+ * or for option descriptors
+ */
+ if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+ max_descs += EFX_TSO_MAX_SEGS;
+
+ /* Possibly more for PCIe page boundaries within input fragments */
+ if (PAGE_SIZE > EFX_PAGE_SIZE)
+ max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+ DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+ return max_descs;
+}
+
+static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
+{
+ /* We need to consider both queues that the net core sees as one */
+ struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
+ struct efx_nic *efx = txq1->efx;
+ unsigned int fill_level;
+
+ fill_level = max(txq1->insert_count - txq1->old_read_count,
+ txq2->insert_count - txq2->old_read_count);
+ if (likely(fill_level < efx->txq_stop_thresh))
+ return;
+
+ /* We used the stale old_read_count above, which gives us a
+ * pessimistic estimate of the fill level (which may even
+ * validly be >= efx->txq_entries). Now try again using
+ * read_count (more likely to be a cache miss).
+ *
+ * If we read read_count and then conditionally stop the
+ * queue, it is possible for the completion path to race with
+ * us and complete all outstanding descriptors in the middle,
+ * after which there will be no more completions to wake it.
+ * Therefore we stop the queue first, then read read_count
+ * (with a memory barrier to ensure the ordering), then
+ * restart the queue if the fill level turns out to be low
+ * enough.
+ */
+ netif_tx_stop_queue(txq1->core_txq);
+ smp_mb();
+ txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
+ txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+
+ fill_level = max(txq1->insert_count - txq1->old_read_count,
+ txq2->insert_count - txq2->old_read_count);
+ EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
+ if (likely(fill_level < efx->txq_stop_thresh)) {
+ smp_mb();
+ if (likely(!efx->loopback_selftest))
+ netif_tx_start_queue(txq1->core_txq);
+ }
+}
+
+#ifdef EFX_USE_PIO
+
+struct efx_short_copy_buffer {
+ int used;
+ u8 buf[L1_CACHE_BYTES];
+};
+
+/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
+ u8 *data, int len,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ int block_len = len & ~(sizeof(copy_buf->buf) - 1);
+
+ __iowrite64_copy(*piobuf, data, block_len >> 3);
+ *piobuf += block_len;
+ len -= block_len;
+
+ if (len) {
+ data += block_len;
+ BUG_ON(copy_buf->used);
+ BUG_ON(len > sizeof(copy_buf->buf));
+ memcpy(copy_buf->buf, data, len);
+ copy_buf->used = len;
+ }
+}
+
+/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
+ u8 *data, int len,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ if (copy_buf->used) {
+ /* if the copy buffer is partially full, fill it up and write */
+ int copy_to_buf =
+ min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
+
+ memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
+ copy_buf->used += copy_to_buf;
+
+ /* if we didn't fill it up then we're done for now */
+ if (copy_buf->used < sizeof(copy_buf->buf))
+ return;
+
+ __iowrite64_copy(*piobuf, copy_buf->buf,
+ sizeof(copy_buf->buf) >> 3);
+ *piobuf += sizeof(copy_buf->buf);
+ data += copy_to_buf;
+ len -= copy_to_buf;
+ copy_buf->used = 0;
+ }
+
+ efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
+}
+
+static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ /* if there's anything in it, write the whole buffer, including junk */
+ if (copy_buf->used)
+ __iowrite64_copy(piobuf, copy_buf->buf,
+ sizeof(copy_buf->buf) >> 3);
+}
+
+/* Traverse skb structure and copy fragments in to PIO buffer.
+ * Advances piobuf pointer.
+ */
+static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
+ u8 __iomem **piobuf,
+ struct efx_short_copy_buffer *copy_buf)
+{
+ int i;
+
+ efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
+ copy_buf);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ u8 *vaddr;
+
+ vaddr = kmap_atomic(skb_frag_page(f));
+
+ efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
+ skb_frag_size(f), copy_buf);
+ kunmap_atomic(vaddr);
+ }
+
+ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
+}
+
+static struct efx_tx_buffer *
+efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ struct efx_tx_buffer *buffer =
+ efx_tx_queue_get_insert_buffer(tx_queue);
+ u8 __iomem *piobuf = tx_queue->piobuf;
+
+ /* Copy to PIO buffer. Ensure the writes are padded to the end
+ * of a cache line, as this is required for write-combining to be
+ * effective on at least x86.
+ */
+
+ if (skb_shinfo(skb)->nr_frags) {
+ /* The size of the copy buffer will ensure all writes
+ * are the size of a cache line.
+ */
+ struct efx_short_copy_buffer copy_buf;
+
+ copy_buf.used = 0;
+
+ efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
+ &piobuf, &copy_buf);
+ efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
+ } else {
+ /* Pad the write to the size of a cache line.
+ * We can do this because we know the skb_shared_info sruct is
+ * after the source, and the destination buffer is big enough.
+ */
+ BUILD_BUG_ON(L1_CACHE_BYTES >
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ __iowrite64_copy(tx_queue->piobuf, skb->data,
+ ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
+ }
+
+ EFX_POPULATE_QWORD_5(buffer->option,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
+ ESF_DZ_TX_PIO_CONT, 0,
+ ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
+ ESF_DZ_TX_PIO_BUF_ADDR,
+ tx_queue->piobuf_offset);
+ ++tx_queue->pio_packets;
+ ++tx_queue->insert_count;
+ return buffer;
+}
+#endif /* EFX_USE_PIO */
+
+/*
+ * Add a socket buffer to a TX queue
+ *
+ * This maps all fragments of a socket buffer for DMA and adds them to
+ * the TX queue. The queue's insert pointer will be incremented by
+ * the number of fragments in the socket buffer.
+ *
+ * If any DMA mapping fails, any mapped fragments will be unmapped,
+ * the queue's insert pointer will be restored to its original value.
+ *
+ * This function is split out from efx_hard_start_xmit to allow the
+ * loopback test to direct packets via specific TX queues.
+ *
+ * Returns NETDEV_TX_OK.
+ * You must hold netif_tx_lock() to call this function.
+ */
+netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct device *dma_dev = &efx->pci_dev->dev;
+ struct efx_tx_buffer *buffer;
+ unsigned int old_insert_count = tx_queue->insert_count;
+ skb_frag_t *fragment;
+ unsigned int len, unmap_len = 0;
+ dma_addr_t dma_addr, unmap_addr = 0;
+ unsigned int dma_len;
+ unsigned short dma_flags;
+ int i = 0;
+
+ if (skb_shinfo(skb)->gso_size)
+ return efx_enqueue_skb_tso(tx_queue, skb);
+
+ /* Get size of the initial fragment */
+ len = skb_headlen(skb);
+
+ /* Pad if necessary */
+ if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
+ EFX_BUG_ON_PARANOID(skb->data_len);
+ len = 32 + 1;
+ if (skb_pad(skb, len - skb->len))
+ return NETDEV_TX_OK;
+ }
+
+ /* Consider using PIO for short packets */
+#ifdef EFX_USE_PIO
+ if (skb->len <= efx_piobuf_size && !skb->xmit_more &&
+ efx_nic_may_tx_pio(tx_queue)) {
+ buffer = efx_enqueue_skb_pio(tx_queue, skb);
+ dma_flags = EFX_TX_BUF_OPTION;
+ goto finish_packet;
+ }
+#endif
+
+ /* Map for DMA. Use dma_map_single rather than dma_map_page
+ * since this is more efficient on machines with sparse
+ * memory.
+ */
+ dma_flags = EFX_TX_BUF_MAP_SINGLE;
+ dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
+
+ /* Process all fragments */
+ while (1) {
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ goto dma_err;
+
+ /* Store fields for marking in the per-fragment final
+ * descriptor */
+ unmap_len = len;
+ unmap_addr = dma_addr;
+
+ /* Add to TX queue, splitting across DMA boundaries */
+ do {
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+
+ dma_len = efx_max_tx_len(efx, dma_addr);
+ if (likely(dma_len >= len))
+ dma_len = len;
+
+ /* Fill out per descriptor fields */
+ buffer->len = dma_len;
+ buffer->dma_addr = dma_addr;
+ buffer->flags = EFX_TX_BUF_CONT;
+ len -= dma_len;
+ dma_addr += dma_len;
+ ++tx_queue->insert_count;
+ } while (len);
+
+ /* Transfer ownership of the unmapping to the final buffer */
+ buffer->flags = EFX_TX_BUF_CONT | dma_flags;
+ buffer->unmap_len = unmap_len;
+ buffer->dma_offset = buffer->dma_addr - unmap_addr;
+ unmap_len = 0;
+
+ /* Get address and size of next fragment */
+ if (i >= skb_shinfo(skb)->nr_frags)
+ break;
+ fragment = &skb_shinfo(skb)->frags[i];
+ len = skb_frag_size(fragment);
+ i++;
+ /* Map for DMA */
+ dma_flags = 0;
+ dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
+ DMA_TO_DEVICE);
+ }
+
+ /* Transfer ownership of the skb to the final buffer */
+#ifdef EFX_USE_PIO
+finish_packet:
+#endif
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB | dma_flags;
+
+ netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
+ efx_tx_maybe_stop_queue(tx_queue);
+
+ /* Pass off to hardware */
+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
+ efx_nic_push_buffers(tx_queue);
+
+ tx_queue->tx_packets++;
+
+ return NETDEV_TX_OK;
+
+ dma_err:
+ netif_err(efx, tx_err, efx->net_dev,
+ " TX queue %d could not map skb with %d bytes %d "
+ "fragments for DMA\n", tx_queue->queue, skb->len,
+ skb_shinfo(skb)->nr_frags + 1);
+
+ /* Mark the packet as transmitted, and free the SKB ourselves */
+ dev_kfree_skb_any(skb);
+
+ /* Work backwards until we hit the original insert pointer value */
+ while (tx_queue->insert_count != old_insert_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ --tx_queue->insert_count;
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+ }
+
+ /* Free the fragment we were mid-way through pushing */
+ if (unmap_len) {
+ if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
+ dma_unmap_single(dma_dev, unmap_addr, unmap_len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dma_dev, unmap_addr, unmap_len,
+ DMA_TO_DEVICE);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/* Remove packets from the TX queue
+ *
+ * This removes packets from the TX queue, up to and including the
+ * specified index.
+ */
+static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
+ unsigned int index,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int stop_index, read_ptr;
+
+ stop_index = (index + 1) & tx_queue->ptr_mask;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+ while (read_ptr != stop_index) {
+ struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+ if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
+ unlikely(buffer->len == 0)) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX queue %d spurious TX completion id %x\n",
+ tx_queue->queue, read_ptr);
+ efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+ return;
+ }
+
+ efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
+
+ ++tx_queue->read_count;
+ read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+ }
+}
+
+/* Initiate a packet transmission. We use one channel per CPU
+ * (sharing when we have more CPUs than channels). On Falcon, the TX
+ * completion events will be directed back to the CPU that transmitted
+ * the packet, which should be cache-efficient.
+ *
+ * Context: non-blocking.
+ * Note that returning anything other than NETDEV_TX_OK will cause the
+ * OS to free the skb.
+ */
+netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_tx_queue *tx_queue;
+ unsigned index, type;
+
+ EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
+
+ /* PTP "event" packet */
+ if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
+ unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
+ return efx_ptp_tx(efx, skb);
+ }
+
+ index = skb_get_queue_mapping(skb);
+ type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
+ if (index >= efx->n_tx_channels) {
+ index -= efx->n_tx_channels;
+ type |= EFX_TXQ_TYPE_HIGHPRI;
+ }
+ tx_queue = efx_get_tx_queue(efx, index, type);
+
+ return efx_enqueue_skb(tx_queue, skb);
+}
+
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+
+ /* Must be inverse of queue lookup in efx_hard_start_xmit() */
+ tx_queue->core_txq =
+ netdev_get_tx_queue(efx->net_dev,
+ tx_queue->queue / EFX_TXQ_TYPES +
+ ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ efx->n_tx_channels : 0));
+}
+
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ unsigned tc;
+ int rc;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
+ return -EINVAL;
+
+ if (num_tc == net_dev->num_tc)
+ return 0;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
+ net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
+ }
+
+ if (num_tc > net_dev->num_tc) {
+ /* Initialise high-priority queues as necessary */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_possible_channel_tx_queue(tx_queue,
+ channel) {
+ if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
+ continue;
+ if (!tx_queue->buffer) {
+ rc = efx_probe_tx_queue(tx_queue);
+ if (rc)
+ return rc;
+ }
+ if (!tx_queue->initialised)
+ efx_init_tx_queue(tx_queue);
+ efx_init_tx_queue_core_txq(tx_queue);
+ }
+ }
+ } else {
+ /* Reduce number of classes before number of queues */
+ net_dev->num_tc = num_tc;
+ }
+
+ rc = netif_set_real_num_tx_queues(net_dev,
+ max_t(int, num_tc, 1) *
+ efx->n_tx_channels);
+ if (rc)
+ return rc;
+
+ /* Do not destroy high-priority queues when they become
+ * unused. We would have to flush them first, and it is
+ * fairly difficult to flush a subset of TX queues. Leave
+ * it to efx_fini_channels().
+ */
+
+ net_dev->num_tc = num_tc;
+ return 0;
+}
+
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ unsigned fill_level;
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_tx_queue *txq2;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+
+ EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
+
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
+
+ if (pkts_compl > 1)
+ ++tx_queue->merge_events;
+
+ /* See if we need to restart the netif queue. This memory
+ * barrier ensures that we write read_count (inside
+ * efx_dequeue_buffers()) before reading the queue status.
+ */
+ smp_mb();
+ if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
+ likely(efx->port_enabled) &&
+ likely(netif_device_present(efx->net_dev))) {
+ txq2 = efx_tx_queue_partner(tx_queue);
+ fill_level = max(tx_queue->insert_count - tx_queue->read_count,
+ txq2->insert_count - txq2->read_count);
+ if (fill_level <= efx->txq_wake_thresh)
+ netif_tx_wake_queue(tx_queue->core_txq);
+ }
+
+ /* Check whether the hardware queue is now empty */
+ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+ tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ if (tx_queue->read_count == tx_queue->old_write_count) {
+ smp_mb();
+ tx_queue->empty_read_count =
+ tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+ }
+ }
+}
+
+/* Size of page-based TSO header buffers. Larger blocks must be
+ * allocated from the heap.
+ */
+#define TSOH_STD_SIZE 128
+#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
+
+/* At most half the descriptors in the queue at any time will refer to
+ * a TSO header buffer, since they must always be followed by a
+ * payload descriptor referring to an skb.
+ */
+static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
+{
+ return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
+}
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int entries;
+ int rc;
+
+ /* Create the smallest power-of-two aligned ring */
+ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
+ EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+ tx_queue->ptr_mask = entries - 1;
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating TX queue %d size %#x mask %#x\n",
+ tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
+
+ /* Allocate software ring */
+ tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
+ GFP_KERNEL);
+ if (!tx_queue->buffer)
+ return -ENOMEM;
+
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
+ tx_queue->tsoh_page =
+ kcalloc(efx_tsoh_page_count(tx_queue),
+ sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
+ if (!tx_queue->tsoh_page) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+ }
+
+ /* Allocate hardware ring */
+ rc = efx_nic_probe_tx(tx_queue);
+ if (rc)
+ goto fail2;
+
+ return 0;
+
+fail2:
+ kfree(tx_queue->tsoh_page);
+ tx_queue->tsoh_page = NULL;
+fail1:
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+ return rc;
+}
+
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "initialising TX queue %d\n", tx_queue->queue);
+
+ tx_queue->insert_count = 0;
+ tx_queue->write_count = 0;
+ tx_queue->old_write_count = 0;
+ tx_queue->read_count = 0;
+ tx_queue->old_read_count = 0;
+ tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
+
+ /* Set up TX descriptor ring */
+ efx_nic_init_tx(tx_queue);
+
+ tx_queue->initialised = true;
+}
+
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
+ if (!tx_queue->buffer)
+ return;
+
+ /* Free any buffers left in the ring */
+ while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+ ++tx_queue->read_count;
+ }
+ netdev_tx_reset_queue(tx_queue->core_txq);
+}
+
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ int i;
+
+ if (!tx_queue->buffer)
+ return;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "destroying TX queue %d\n", tx_queue->queue);
+ efx_nic_remove_tx(tx_queue);
+
+ if (tx_queue->tsoh_page) {
+ for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
+ efx_nic_free_buffer(tx_queue->efx,
+ &tx_queue->tsoh_page[i]);
+ kfree(tx_queue->tsoh_page);
+ tx_queue->tsoh_page = NULL;
+ }
+
+ kfree(tx_queue->buffer);
+ tx_queue->buffer = NULL;
+}
+
+
+/* Efx TCP segmentation acceleration.
+ *
+ * Why? Because by doing it here in the driver we can go significantly
+ * faster than the GSO.
+ *
+ * Requires TX checksum offload support.
+ */
+
+#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
+
+/**
+ * struct tso_state - TSO state for an SKB
+ * @out_len: Remaining length in current segment
+ * @seqnum: Current sequence number
+ * @ipv4_id: Current IPv4 ID, host endian
+ * @packet_space: Remaining space in current packet
+ * @dma_addr: DMA address of current position
+ * @in_len: Remaining length in current SKB fragment
+ * @unmap_len: Length of SKB fragment
+ * @unmap_addr: DMA address of SKB fragment
+ * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
+ * @protocol: Network protocol (after any VLAN header)
+ * @ip_off: Offset of IP header
+ * @tcp_off: Offset of TCP header
+ * @header_len: Number of bytes of header
+ * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
+ * @header_dma_addr: Header DMA address, when using option descriptors
+ * @header_unmap_len: Header DMA mapped length, or 0 if not using option
+ * descriptors
+ *
+ * The state used during segmentation. It is put into this data structure
+ * just to make it easy to pass into inline functions.
+ */
+struct tso_state {
+ /* Output position */
+ unsigned out_len;
+ unsigned seqnum;
+ u16 ipv4_id;
+ unsigned packet_space;
+
+ /* Input position */
+ dma_addr_t dma_addr;
+ unsigned in_len;
+ unsigned unmap_len;
+ dma_addr_t unmap_addr;
+ unsigned short dma_flags;
+
+ __be16 protocol;
+ unsigned int ip_off;
+ unsigned int tcp_off;
+ unsigned header_len;
+ unsigned int ip_base_len;
+ dma_addr_t header_dma_addr;
+ unsigned int header_unmap_len;
+};
+
+
+/*
+ * Verify that our various assumptions about sk_buffs and the conditions
+ * under which TSO will be attempted hold true. Return the protocol number.
+ */
+static __be16 efx_tso_check_protocol(struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+
+ EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
+ protocol);
+ if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+ }
+
+ if (protocol == htons(ETH_P_IP)) {
+ EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
+ } else {
+ EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
+ EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
+ }
+ EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
+ + (tcp_hdr(skb)->doff << 2u)) >
+ skb_headlen(skb));
+
+ return protocol;
+}
+
+static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer, unsigned int len)
+{
+ u8 *result;
+
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(buffer->flags);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
+
+ if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
+ unsigned index =
+ (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
+ struct efx_buffer *page_buf =
+ &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
+ unsigned offset =
+ TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
+
+ if (unlikely(!page_buf->addr) &&
+ efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
+ GFP_ATOMIC))
+ return NULL;
+
+ result = (u8 *)page_buf->addr + offset;
+ buffer->dma_addr = page_buf->dma_addr + offset;
+ buffer->flags = EFX_TX_BUF_CONT;
+ } else {
+ tx_queue->tso_long_headers++;
+
+ buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
+ if (unlikely(!buffer->heap_buf))
+ return NULL;
+ result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
+ buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
+ }
+
+ buffer->len = len;
+
+ return result;
+}
+
+/**
+ * efx_tx_queue_insert - push descriptors onto the TX queue
+ * @tx_queue: Efx TX queue
+ * @dma_addr: DMA address of fragment
+ * @len: Length of fragment
+ * @final_buffer: The final buffer inserted into the queue
+ *
+ * Push descriptors onto the TX queue.
+ */
+static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned len,
+ struct efx_tx_buffer **final_buffer)
+{
+ struct efx_tx_buffer *buffer;
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned dma_len;
+
+ EFX_BUG_ON_PARANOID(len <= 0);
+
+ while (1) {
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ ++tx_queue->insert_count;
+
+ EFX_BUG_ON_PARANOID(tx_queue->insert_count -
+ tx_queue->read_count >=
+ efx->txq_entries);
+
+ buffer->dma_addr = dma_addr;
+
+ dma_len = efx_max_tx_len(efx, dma_addr);
+
+ /* If there is enough space to send then do so */
+ if (dma_len >= len)
+ break;
+
+ buffer->len = dma_len;
+ buffer->flags = EFX_TX_BUF_CONT;
+ dma_addr += dma_len;
+ len -= dma_len;
+ }
+
+ EFX_BUG_ON_PARANOID(!len);
+ buffer->len = len;
+ *final_buffer = buffer;
+}
+
+
+/*
+ * Put a TSO header into the TX queue.
+ *
+ * This is special-cased because we know that it is small enough to fit in
+ * a single fragment, and we know it doesn't cross a page boundary. It
+ * also allows us to not worry about end-of-packet etc.
+ */
+static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer, u8 *header)
+{
+ if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
+ buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
+ header, buffer->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
+ buffer->dma_addr))) {
+ kfree(buffer->heap_buf);
+ buffer->len = 0;
+ buffer->flags = 0;
+ return -ENOMEM;
+ }
+ buffer->unmap_len = buffer->len;
+ buffer->dma_offset = 0;
+ buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
+ }
+
+ ++tx_queue->insert_count;
+ return 0;
+}
+
+
+/* Remove buffers put into a tx_queue. None of the buffers must have
+ * an skb attached.
+ */
+static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count)
+{
+ struct efx_tx_buffer *buffer;
+
+ /* Work backwards until we hit the original insert pointer value */
+ while (tx_queue->insert_count != insert_count) {
+ --tx_queue->insert_count;
+ buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
+ efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
+ }
+}
+
+
+/* Parse the SKB header and initialise state. */
+static int tso_start(struct tso_state *st, struct efx_nic *efx,
+ const struct sk_buff *skb)
+{
+ bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+ struct device *dma_dev = &efx->pci_dev->dev;
+ unsigned int header_len, in_len;
+ dma_addr_t dma_addr;
+
+ st->ip_off = skb_network_header(skb) - skb->data;
+ st->tcp_off = skb_transport_header(skb) - skb->data;
+ header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+ in_len = skb_headlen(skb) - header_len;
+ st->header_len = header_len;
+ st->in_len = in_len;
+ if (st->protocol == htons(ETH_P_IP)) {
+ st->ip_base_len = st->header_len - st->ip_off;
+ st->ipv4_id = ntohs(ip_hdr(skb)->id);
+ } else {
+ st->ip_base_len = st->header_len - st->tcp_off;
+ st->ipv4_id = 0;
+ }
+ st->seqnum = ntohl(tcp_hdr(skb)->seq);
+
+ EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
+ EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
+ EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
+
+ st->out_len = skb->len - header_len;
+
+ if (!use_opt_desc) {
+ st->header_unmap_len = 0;
+
+ if (likely(in_len == 0)) {
+ st->dma_flags = 0;
+ st->unmap_len = 0;
+ return 0;
+ }
+
+ dma_addr = dma_map_single(dma_dev, skb->data + header_len,
+ in_len, DMA_TO_DEVICE);
+ st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
+ st->dma_addr = dma_addr;
+ st->unmap_addr = dma_addr;
+ st->unmap_len = in_len;
+ } else {
+ dma_addr = dma_map_single(dma_dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ st->header_dma_addr = dma_addr;
+ st->header_unmap_len = skb_headlen(skb);
+ st->dma_flags = 0;
+ st->dma_addr = dma_addr + header_len;
+ st->unmap_len = 0;
+ }
+
+ return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
+}
+
+static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
+ skb_frag_t *frag)
+{
+ st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
+ st->dma_flags = 0;
+ st->unmap_len = skb_frag_size(frag);
+ st->in_len = skb_frag_size(frag);
+ st->dma_addr = st->unmap_addr;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+
+/**
+ * tso_fill_packet_with_fragment - form descriptors for the current fragment
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ * @st: TSO state
+ *
+ * Form descriptors for the current fragment, until we reach the end
+ * of fragment or end-of-packet.
+ */
+static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
+{
+ struct efx_tx_buffer *buffer;
+ int n;
+
+ if (st->in_len == 0)
+ return;
+ if (st->packet_space == 0)
+ return;
+
+ EFX_BUG_ON_PARANOID(st->in_len <= 0);
+ EFX_BUG_ON_PARANOID(st->packet_space <= 0);
+
+ n = min(st->in_len, st->packet_space);
+
+ st->packet_space -= n;
+ st->out_len -= n;
+ st->in_len -= n;
+
+ efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
+
+ if (st->out_len == 0) {
+ /* Transfer ownership of the skb */
+ buffer->skb = skb;
+ buffer->flags = EFX_TX_BUF_SKB;
+ } else if (st->packet_space != 0) {
+ buffer->flags = EFX_TX_BUF_CONT;
+ }
+
+ if (st->in_len == 0) {
+ /* Transfer ownership of the DMA mapping */
+ buffer->unmap_len = st->unmap_len;
+ buffer->dma_offset = buffer->unmap_len - buffer->len;
+ buffer->flags |= st->dma_flags;
+ st->unmap_len = 0;
+ }
+
+ st->dma_addr += n;
+}
+
+
+/**
+ * tso_start_new_packet - generate a new header and prepare for the new packet
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ * @st: TSO state
+ *
+ * Generate a new header and prepare for the new packet. Return 0 on
+ * success, or -%ENOMEM if failed to alloc header.
+ */
+static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
+{
+ struct efx_tx_buffer *buffer =
+ efx_tx_queue_get_insert_buffer(tx_queue);
+ bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
+ u8 tcp_flags_clear;
+
+ if (!is_last) {
+ st->packet_space = skb_shinfo(skb)->gso_size;
+ tcp_flags_clear = 0x09; /* mask out FIN and PSH */
+ } else {
+ st->packet_space = st->out_len;
+ tcp_flags_clear = 0x00;
+ }
+
+ if (!st->header_unmap_len) {
+ /* Allocate and insert a DMA-mapped header buffer. */
+ struct tcphdr *tsoh_th;
+ unsigned ip_length;
+ u8 *header;
+ int rc;
+
+ header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+ if (!header)
+ return -ENOMEM;
+
+ tsoh_th = (struct tcphdr *)(header + st->tcp_off);
+
+ /* Copy and update the headers. */
+ memcpy(header, skb->data, st->header_len);
+
+ tsoh_th->seq = htonl(st->seqnum);
+ ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
+
+ ip_length = st->ip_base_len + st->packet_space;
+
+ if (st->protocol == htons(ETH_P_IP)) {
+ struct iphdr *tsoh_iph =
+ (struct iphdr *)(header + st->ip_off);
+
+ tsoh_iph->tot_len = htons(ip_length);
+ tsoh_iph->id = htons(st->ipv4_id);
+ } else {
+ struct ipv6hdr *tsoh_iph =
+ (struct ipv6hdr *)(header + st->ip_off);
+
+ tsoh_iph->payload_len = htons(ip_length);
+ }
+
+ rc = efx_tso_put_header(tx_queue, buffer, header);
+ if (unlikely(rc))
+ return rc;
+ } else {
+ /* Send the original headers with a TSO option descriptor
+ * in front
+ */
+ u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
+
+ buffer->flags = EFX_TX_BUF_OPTION;
+ buffer->len = 0;
+ buffer->unmap_len = 0;
+ EFX_POPULATE_QWORD_5(buffer->option,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+ ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
+ ++tx_queue->insert_count;
+
+ /* We mapped the headers in tso_start(). Unmap them
+ * when the last segment is completed.
+ */
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ buffer->dma_addr = st->header_dma_addr;
+ buffer->len = st->header_len;
+ if (is_last) {
+ buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
+ buffer->unmap_len = st->header_unmap_len;
+ buffer->dma_offset = 0;
+ /* Ensure we only unmap them once in case of a
+ * later DMA mapping error and rollback
+ */
+ st->header_unmap_len = 0;
+ } else {
+ buffer->flags = EFX_TX_BUF_CONT;
+ buffer->unmap_len = 0;
+ }
+ ++tx_queue->insert_count;
+ }
+
+ st->seqnum += skb_shinfo(skb)->gso_size;
+
+ /* Linux leaves suitable gaps in the IP ID space for us to fill. */
+ ++st->ipv4_id;
+
+ ++tx_queue->tso_packets;
+
+ ++tx_queue->tx_packets;
+
+ return 0;
+}
+
+
+/**
+ * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ *
+ * Context: You must hold netif_tx_lock() to call this function.
+ *
+ * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
+ * @skb was not enqueued. In all cases @skb is consumed. Return
+ * %NETDEV_TX_OK.
+ */
+static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned int old_insert_count = tx_queue->insert_count;
+ int frag_i, rc;
+ struct tso_state state;
+
+ /* Find the packet protocol and sanity-check it */
+ state.protocol = efx_tso_check_protocol(skb);
+
+ rc = tso_start(&state, efx, skb);
+ if (rc)
+ goto mem_err;
+
+ if (likely(state.in_len == 0)) {
+ /* Grab the first payload fragment. */
+ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
+ frag_i = 0;
+ rc = tso_get_fragment(&state, efx,
+ skb_shinfo(skb)->frags + frag_i);
+ if (rc)
+ goto mem_err;
+ } else {
+ /* Payload starts in the header area. */
+ frag_i = -1;
+ }
+
+ if (tso_start_new_packet(tx_queue, skb, &state) < 0)
+ goto mem_err;
+
+ while (1) {
+ tso_fill_packet_with_fragment(tx_queue, skb, &state);
+
+ /* Move onto the next fragment? */
+ if (state.in_len == 0) {
+ if (++frag_i >= skb_shinfo(skb)->nr_frags)
+ /* End of payload reached. */
+ break;
+ rc = tso_get_fragment(&state, efx,
+ skb_shinfo(skb)->frags + frag_i);
+ if (rc)
+ goto mem_err;
+ }
+
+ /* Start at new packet? */
+ if (state.packet_space == 0 &&
+ tso_start_new_packet(tx_queue, skb, &state) < 0)
+ goto mem_err;
+ }
+
+ netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
+ efx_tx_maybe_stop_queue(tx_queue);
+
+ /* Pass off to hardware */
+ if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
+ efx_nic_push_buffers(tx_queue);
+
+ tx_queue->tso_bursts++;
+ return NETDEV_TX_OK;
+
+ mem_err:
+ netif_err(efx, tx_err, efx->net_dev,
+ "Out of memory for TSO headers, or DMA mapping error\n");
+ dev_kfree_skb_any(skb);
+
+ /* Free the DMA mapping we were in the process of writing out */
+ if (state.unmap_len) {
+ if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
+ dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
+ state.unmap_len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
+ state.unmap_len, DMA_TO_DEVICE);
+ }
+
+ /* Free the header DMA mapping, if using option descriptors */
+ if (state.header_unmap_len)
+ dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
+ state.header_unmap_len, DMA_TO_DEVICE);
+
+ efx_enqueue_unwind(tx_queue, old_insert_count);
+ return NETDEV_TX_OK;
+}
diff --git a/kernel/drivers/net/ethernet/sfc/txc43128_phy.c b/kernel/drivers/net/ethernet/sfc/txc43128_phy.c
new file mode 100644
index 000000000..3d5ee3259
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -0,0 +1,560 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/*
+ * Driver for Transwitch/Mysticom CX4 retimer
+ * see www.transwitch.com, part is TXC-43128
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include "efx.h"
+#include "mdio_10g.h"
+#include "phy.h"
+#include "nic.h"
+
+/* We expect these MMDs to be in the package */
+#define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \
+ MDIO_DEVS_PMAPMD | \
+ MDIO_DEVS_PHYXS)
+
+#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \
+ (1 << LOOPBACK_PMAPMD) | \
+ (1 << LOOPBACK_PHYXS_WS))
+
+/**************************************************************************
+ *
+ * Compile-time config
+ *
+ **************************************************************************
+ */
+#define TXCNAME "TXC43128"
+/* Total length of time we'll wait for the PHY to come out of reset (ms) */
+#define TXC_MAX_RESET_TIME 500
+/* Interval between checks (ms) */
+#define TXC_RESET_WAIT 10
+/* How long to run BIST (us) */
+#define TXC_BIST_DURATION 50
+
+/**************************************************************************
+ *
+ * Register definitions
+ *
+ **************************************************************************
+ */
+
+/* Command register */
+#define TXC_GLRGS_GLCMD 0xc004
+/* Useful bits in command register */
+/* Lane power-down */
+#define TXC_GLCMD_L01PD_LBN 5
+#define TXC_GLCMD_L23PD_LBN 6
+/* Limited SW reset: preserves configuration but
+ * initiates a logic reset. Self-clearing */
+#define TXC_GLCMD_LMTSWRST_LBN 14
+
+/* Signal Quality Control */
+#define TXC_GLRGS_GSGQLCTL 0xc01a
+/* Enable bit */
+#define TXC_GSGQLCT_SGQLEN_LBN 15
+/* Lane selection */
+#define TXC_GSGQLCT_LNSL_LBN 13
+#define TXC_GSGQLCT_LNSL_WIDTH 2
+
+/* Analog TX control */
+#define TXC_ALRGS_ATXCTL 0xc040
+/* Lane power-down */
+#define TXC_ATXCTL_TXPD3_LBN 15
+#define TXC_ATXCTL_TXPD2_LBN 14
+#define TXC_ATXCTL_TXPD1_LBN 13
+#define TXC_ATXCTL_TXPD0_LBN 12
+
+/* Amplitude on lanes 0, 1 */
+#define TXC_ALRGS_ATXAMP0 0xc041
+/* Amplitude on lanes 2, 3 */
+#define TXC_ALRGS_ATXAMP1 0xc042
+/* Bit position of value for lane 0 (or 2) */
+#define TXC_ATXAMP_LANE02_LBN 3
+/* Bit position of value for lane 1 (or 3) */
+#define TXC_ATXAMP_LANE13_LBN 11
+
+#define TXC_ATXAMP_1280_mV 0
+#define TXC_ATXAMP_1200_mV 8
+#define TXC_ATXAMP_1120_mV 12
+#define TXC_ATXAMP_1060_mV 14
+#define TXC_ATXAMP_0820_mV 25
+#define TXC_ATXAMP_0720_mV 26
+#define TXC_ATXAMP_0580_mV 27
+#define TXC_ATXAMP_0440_mV 28
+
+#define TXC_ATXAMP_0820_BOTH \
+ ((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \
+ | (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN))
+
+#define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */
+
+/* Preemphasis on lanes 0, 1 */
+#define TXC_ALRGS_ATXPRE0 0xc043
+/* Preemphasis on lanes 2, 3 */
+#define TXC_ALRGS_ATXPRE1 0xc044
+
+#define TXC_ATXPRE_NONE 0
+#define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */
+
+#define TXC_ALRGS_ARXCTL 0xc045
+/* Lane power-down */
+#define TXC_ARXCTL_RXPD3_LBN 15
+#define TXC_ARXCTL_RXPD2_LBN 14
+#define TXC_ARXCTL_RXPD1_LBN 13
+#define TXC_ARXCTL_RXPD0_LBN 12
+
+/* Main control */
+#define TXC_MRGS_CTL 0xc340
+/* Bits in main control */
+#define TXC_MCTL_RESET_LBN 15 /* Self clear */
+#define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */
+#define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */
+
+/* GPIO output */
+#define TXC_GPIO_OUTPUT 0xc346
+#define TXC_GPIO_DIR 0xc348
+
+/* Vendor-specific BIST registers */
+#define TXC_BIST_CTL 0xc280
+#define TXC_BIST_TXFRMCNT 0xc281
+#define TXC_BIST_RX0FRMCNT 0xc282
+#define TXC_BIST_RX1FRMCNT 0xc283
+#define TXC_BIST_RX2FRMCNT 0xc284
+#define TXC_BIST_RX3FRMCNT 0xc285
+#define TXC_BIST_RX0ERRCNT 0xc286
+#define TXC_BIST_RX1ERRCNT 0xc287
+#define TXC_BIST_RX2ERRCNT 0xc288
+#define TXC_BIST_RX3ERRCNT 0xc289
+
+/* BIST type (controls bit patter in test) */
+#define TXC_BIST_CTRL_TYPE_LBN 10
+#define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */
+#define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */
+#define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */
+#define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */
+/* Set this to 1 for 10 bit and 0 for 8 bit */
+#define TXC_BIST_CTRL_B10EN_LBN 12
+/* Enable BIST (write 0 to disable) */
+#define TXC_BIST_CTRL_ENAB_LBN 13
+/* Stop BIST (self-clears when stop complete) */
+#define TXC_BIST_CTRL_STOP_LBN 14
+/* Start BIST (cleared by writing 1 to STOP) */
+#define TXC_BIST_CTRL_STRT_LBN 15
+
+/* Mt. Diablo test configuration */
+#define TXC_MTDIABLO_CTRL 0xc34f
+#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10
+
+struct txc43128_data {
+ unsigned long bug10934_timer;
+ enum efx_phy_mode phy_mode;
+ enum efx_loopback_mode loopback_mode;
+};
+
+/* The PHY sometimes needs a reset to bring the link back up. So long as
+ * it reports link down, we reset it every 5 seconds.
+ */
+#define BUG10934_RESET_INTERVAL (5 * HZ)
+
+/* Perform a reset that doesn't clear configuration changes */
+static void txc_reset_logic(struct efx_nic *efx);
+
+/* Set the output value of a gpio */
+void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
+}
+
+/* Set up the GPIO direction register */
+void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
+{
+ efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
+}
+
+/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
+ * global reset (it's less clear what reset of other MMDs does).*/
+static int txc_reset_phy(struct efx_nic *efx)
+{
+ int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
+ TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
+ TXC_RESET_WAIT);
+ if (rc < 0)
+ goto fail;
+
+ /* Check that all the MMDs we expect are present and responding. */
+ rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
+ if (rc < 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n");
+ return rc;
+}
+
+/* Run a single BIST on one MMD */
+static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
+{
+ int ctrl, bctl;
+ int lane;
+ int rc = 0;
+
+ /* Set PMA to test into loopback using Mt Diablo reg as per app note */
+ ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
+ ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
+
+ /* The BIST app. note lists these as 3 distinct steps. */
+ /* Set the BIST type */
+ bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
+
+ /* Set the BSTEN bit in the BIST Control register to enable */
+ bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
+
+ /* Set the BSTRT bit in the BIST Control register */
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL,
+ bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
+
+ /* Wait. */
+ udelay(TXC_BIST_DURATION);
+
+ /* Set the BSTOP bit in the BIST Control register */
+ bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
+
+ /* The STOP bit should go off when things have stopped */
+ while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
+ bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL);
+
+ /* Check all the error counts are 0 and all the frame counts are
+ non-zero */
+ for (lane = 0; lane < 4; lane++) {
+ int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
+ if (count != 0) {
+ netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
+ "Lane %d had %d errs\n", lane, count);
+ rc = -EIO;
+ }
+ count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
+ if (count == 0) {
+ netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
+ "Lane %d got 0 frames\n", lane);
+ rc = -EIO;
+ }
+ }
+
+ if (rc == 0)
+ netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
+
+ /* Disable BIST */
+ efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
+
+ /* Turn off loopback */
+ ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
+ efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
+
+ return rc;
+}
+
+static int txc_bist(struct efx_nic *efx)
+{
+ return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
+}
+
+/* Push the non-configurable defaults into the PHY. This must be
+ * done after every full reset */
+static void txc_apply_defaults(struct efx_nic *efx)
+{
+ int mctrl;
+
+ /* Turn amplitude down and preemphasis off on the host side
+ * (PHY<->MAC) as this is believed less likely to upset Falcon
+ * and no adverse effects have been noted. It probably also
+ * saves a picowatt or two */
+
+ /* Turn off preemphasis */
+ efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
+ efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
+
+ /* Turn down the amplitude */
+ efx_mdio_write(efx, MDIO_MMD_PHYXS,
+ TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
+ efx_mdio_write(efx, MDIO_MMD_PHYXS,
+ TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
+
+ /* Set the line side amplitude and preemphasis to the databook
+ * defaults as an erratum causes them to be 0 on at least some
+ * PHY rev.s */
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD,
+ TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
+
+ /* Set up the LEDs */
+ mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
+
+ /* Set the Green and Red LEDs to their default modes */
+ mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
+ efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
+
+ /* Databook recommends doing this after configuration changes */
+ txc_reset_logic(efx);
+
+ falcon_board(efx)->type->init_phy(efx);
+}
+
+static int txc43128_phy_probe(struct efx_nic *efx)
+{
+ struct txc43128_data *phy_data;
+
+ /* Allocate phy private storage */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+ phy_data->phy_mode = efx->phy_mode;
+
+ efx->mdio.mmds = TXC_REQUIRED_DEVS;
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+ efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+
+ return 0;
+}
+
+/* Initialisation entry point for this PHY driver */
+static int txc43128_phy_init(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = txc_reset_phy(efx);
+ if (rc < 0)
+ return rc;
+
+ rc = txc_bist(efx);
+ if (rc < 0)
+ return rc;
+
+ txc_apply_defaults(efx);
+
+ return 0;
+}
+
+/* Set the lane power down state in the global registers */
+static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
+{
+ int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
+ int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
+
+ if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
+ ctl &= ~pd;
+ else
+ ctl |= pd;
+
+ efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
+}
+
+/* Set the lane power down state in the analog control registers */
+static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
+{
+ int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
+ | (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
+ int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
+ | (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
+ int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
+ int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
+
+ if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
+ txctl &= ~txpd;
+ rxctl &= ~rxpd;
+ } else {
+ txctl |= txpd;
+ rxctl |= rxpd;
+ }
+
+ efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
+ efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
+}
+
+static void txc_set_power(struct efx_nic *efx)
+{
+ /* According to the data book, all the MMDs can do low power */
+ efx_mdio_set_mmds_lpower(efx,
+ !!(efx->phy_mode & PHY_MODE_LOW_POWER),
+ TXC_REQUIRED_DEVS);
+
+ /* Global register bank is in PCS, PHY XS. These control the host
+ * side and line side settings respectively. */
+ txc_glrgs_lane_power(efx, MDIO_MMD_PCS);
+ txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS);
+
+ /* Analog register bank in PMA/PMD, PHY XS */
+ txc_analog_lane_power(efx, MDIO_MMD_PMAPMD);
+ txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
+}
+
+static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
+{
+ int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
+ int tries = 50;
+
+ val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
+ efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
+ while (tries--) {
+ val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
+ if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
+ break;
+ udelay(1);
+ }
+ if (!tries)
+ netif_info(efx, hw, efx->net_dev,
+ TXCNAME " Logic reset timed out!\n");
+}
+
+/* Perform a logic reset. This preserves the configuration registers
+ * and is needed for some configuration changes to take effect */
+static void txc_reset_logic(struct efx_nic *efx)
+{
+ /* The data sheet claims we can do the logic reset on either the
+ * PCS or the PHYXS and the result is a reset of both host- and
+ * line-side logic. */
+ txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
+}
+
+static bool txc43128_phy_read_link(struct efx_nic *efx)
+{
+ return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
+}
+
+static int txc43128_phy_reconfigure(struct efx_nic *efx)
+{
+ struct txc43128_data *phy_data = efx->phy_data;
+ enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
+ bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
+
+ if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
+ txc_reset_phy(efx);
+ txc_apply_defaults(efx);
+ falcon_reset_xaui(efx);
+ mode_change &= ~PHY_MODE_TX_DISABLED;
+ }
+
+ efx_mdio_transmit_disable(efx);
+ efx_mdio_phy_reconfigure(efx);
+ if (mode_change & PHY_MODE_LOW_POWER)
+ txc_set_power(efx);
+
+ /* The data sheet claims this is required after every reconfiguration
+ * (note at end of 7.1), but we mustn't do it when nothing changes as
+ * it glitches the link, and reconfigure gets called on link change,
+ * so we get an IRQ storm on link up. */
+ if (loop_change || mode_change)
+ txc_reset_logic(efx);
+
+ phy_data->phy_mode = efx->phy_mode;
+ phy_data->loopback_mode = efx->loopback_mode;
+
+ return 0;
+}
+
+static void txc43128_phy_fini(struct efx_nic *efx)
+{
+ /* Disable link events */
+ efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
+}
+
+static void txc43128_phy_remove(struct efx_nic *efx)
+{
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
+}
+
+/* Periodic callback: this exists mainly to poll link status as we
+ * don't use LASI interrupts */
+static bool txc43128_phy_poll(struct efx_nic *efx)
+{
+ struct txc43128_data *data = efx->phy_data;
+ bool was_up = efx->link_state.up;
+
+ efx->link_state.up = txc43128_phy_read_link(efx);
+ efx->link_state.speed = 10000;
+ efx->link_state.fd = true;
+ efx->link_state.fc = efx->wanted_fc;
+
+ if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) {
+ data->bug10934_timer = jiffies;
+ } else {
+ if (time_after_eq(jiffies, (data->bug10934_timer +
+ BUG10934_RESET_INTERVAL))) {
+ data->bug10934_timer = jiffies;
+ txc_reset_logic(efx);
+ }
+ }
+
+ return efx->link_state.up != was_up;
+}
+
+static const char *const txc43128_test_names[] = {
+ "bist"
+};
+
+static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index)
+{
+ if (index < ARRAY_SIZE(txc43128_test_names))
+ return txc43128_test_names[index];
+ return NULL;
+}
+
+static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
+{
+ int rc;
+
+ if (!(flags & ETH_TEST_FL_OFFLINE))
+ return 0;
+
+ rc = txc_reset_phy(efx);
+ if (rc < 0)
+ return rc;
+
+ rc = txc_bist(efx);
+ txc_apply_defaults(efx);
+ results[0] = rc ? -1 : 1;
+ return rc;
+}
+
+static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+ mdio45_ethtool_gset(&efx->mdio, ecmd);
+}
+
+const struct efx_phy_operations falcon_txc_phy_ops = {
+ .probe = txc43128_phy_probe,
+ .init = txc43128_phy_init,
+ .reconfigure = txc43128_phy_reconfigure,
+ .poll = txc43128_phy_poll,
+ .fini = txc43128_phy_fini,
+ .remove = txc43128_phy_remove,
+ .get_settings = txc43128_get_settings,
+ .set_settings = efx_mdio_set_settings,
+ .test_alive = efx_mdio_test_alive,
+ .run_tests = txc43128_run_tests,
+ .test_name = txc43128_test_name,
+};
diff --git a/kernel/drivers/net/ethernet/sfc/vfdi.h b/kernel/drivers/net/ethernet/sfc/vfdi.h
new file mode 100644
index 000000000..f62901d4c
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/vfdi.h
@@ -0,0 +1,255 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2010-2012 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef _VFDI_H
+#define _VFDI_H
+
+/**
+ * DOC: Virtual Function Driver Interface
+ *
+ * This file contains software structures used to form a two way
+ * communication channel between the VF driver and the PF driver,
+ * named Virtual Function Driver Interface (VFDI).
+ *
+ * For the purposes of VFDI, a page is a memory region with size and
+ * alignment of 4K. All addresses are DMA addresses to be used within
+ * the domain of the relevant VF.
+ *
+ * The only hardware-defined channels for a VF driver to communicate
+ * with the PF driver are the event mailboxes (%FR_CZ_USR_EV
+ * registers). Writing to these registers generates an event with
+ * EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox
+ * and USER_EV_REG_VALUE set to the value written. The PF driver may
+ * direct or disable delivery of these events by setting
+ * %FR_CZ_USR_EV_CFG.
+ *
+ * The PF driver can send arbitrary events to arbitrary event queues.
+ * However, for consistency, VFDI events from the PF are defined to
+ * follow the same form and be sent to the first event queue assigned
+ * to the VF while that queue is enabled by the VF driver.
+ *
+ * The general form of the variable bits of VFDI events is:
+ *
+ * 0 16 24 31
+ * | DATA | TYPE | SEQ |
+ *
+ * SEQ is a sequence number which should be incremented by 1 (modulo
+ * 256) for each event. The sequence numbers used in each direction
+ * are independent.
+ *
+ * The VF submits requests of type &struct vfdi_req by sending the
+ * address of the request (ADDR) in a series of 4 events:
+ *
+ * 0 16 24 31
+ * | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ |
+ * | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 |
+ * | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 |
+ * | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 |
+ *
+ * The address must be page-aligned. After receiving such a valid
+ * series of events, the PF driver will attempt to read the request
+ * and write a response to the same address. In case of an invalid
+ * sequence of events or a DMA error, there will be no response.
+ *
+ * The VF driver may request that the PF driver writes status
+ * information into its domain asynchronously. After writing the
+ * status, the PF driver will send an event of the form:
+ *
+ * 0 16 24 31
+ * | reserved | VFDI_EV_TYPE_STATUS | SEQ |
+ *
+ * In case the VF must be reset for any reason, the PF driver will
+ * send an event of the form:
+ *
+ * 0 16 24 31
+ * | reserved | VFDI_EV_TYPE_RESET | SEQ |
+ *
+ * It is then the responsibility of the VF driver to request
+ * reinitialisation of its queues.
+ */
+#define VFDI_EV_SEQ_LBN 24
+#define VFDI_EV_SEQ_WIDTH 8
+#define VFDI_EV_TYPE_LBN 16
+#define VFDI_EV_TYPE_WIDTH 8
+#define VFDI_EV_TYPE_REQ_WORD0 0
+#define VFDI_EV_TYPE_REQ_WORD1 1
+#define VFDI_EV_TYPE_REQ_WORD2 2
+#define VFDI_EV_TYPE_REQ_WORD3 3
+#define VFDI_EV_TYPE_STATUS 4
+#define VFDI_EV_TYPE_RESET 5
+#define VFDI_EV_DATA_LBN 0
+#define VFDI_EV_DATA_WIDTH 16
+
+struct vfdi_endpoint {
+ u8 mac_addr[ETH_ALEN];
+ __be16 tci;
+};
+
+/**
+ * enum vfdi_op - VFDI operation enumeration
+ * @VFDI_OP_RESPONSE: Indicates a response to the request.
+ * @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ.
+ * @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ.
+ * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
+ * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
+ * finalize the SRAM entries.
+ * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ.
+ * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
+ * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
+ * from PF and write the initial status.
+ * @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status
+ * updates from PF.
+ */
+enum vfdi_op {
+ VFDI_OP_RESPONSE = 0,
+ VFDI_OP_INIT_EVQ = 1,
+ VFDI_OP_INIT_RXQ = 2,
+ VFDI_OP_INIT_TXQ = 3,
+ VFDI_OP_FINI_ALL_QUEUES = 4,
+ VFDI_OP_INSERT_FILTER = 5,
+ VFDI_OP_REMOVE_ALL_FILTERS = 6,
+ VFDI_OP_SET_STATUS_PAGE = 7,
+ VFDI_OP_CLEAR_STATUS_PAGE = 8,
+ VFDI_OP_LIMIT,
+};
+
+/* Response codes for VFDI operations. Other values may be used in future. */
+#define VFDI_RC_SUCCESS 0
+#define VFDI_RC_ENOMEM (-12)
+#define VFDI_RC_EINVAL (-22)
+#define VFDI_RC_EOPNOTSUPP (-95)
+#define VFDI_RC_ETIMEDOUT (-110)
+
+/**
+ * struct vfdi_req - Request from VF driver to PF driver
+ * @op: Operation code or response indicator, taken from &enum vfdi_op.
+ * @rc: Response code. Set to 0 on success or a negative error code on failure.
+ * @u.init_evq.index: Index of event queue to create.
+ * @u.init_evq.buf_count: Number of 4k buffers backing event queue.
+ * @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA
+ * address of each page backing the event queue.
+ * @u.init_rxq.index: Index of receive queue to create.
+ * @u.init_rxq.buf_count: Number of 4k buffers backing receive queue.
+ * @u.init_rxq.evq: Instance of event queue to target receive events at.
+ * @u.init_rxq.label: Label used in receive events.
+ * @u.init_rxq.flags: Unused.
+ * @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA
+ * address of each page backing the receive queue.
+ * @u.init_txq.index: Index of transmit queue to create.
+ * @u.init_txq.buf_count: Number of 4k buffers backing transmit queue.
+ * @u.init_txq.evq: Instance of event queue to target transmit completion
+ * events at.
+ * @u.init_txq.label: Label used in transmit completion events.
+ * @u.init_txq.flags: Checksum offload flags.
+ * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
+ * address of each page backing the transmit queue.
+ * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting
+ * all traffic at this receive queue.
+ * @u.mac_filter.flags: MAC filter flags.
+ * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
+ * This address must be page-aligned and the PF may write up to a
+ * whole page (allowing for extension of the structure).
+ * @u.set_status_page.peer_page_count: Number of additional pages the VF
+ * has provided into which peer addresses may be DMAd.
+ * @u.set_status_page.peer_page_addr: Array of DMA addresses of pages.
+ * If the number of peers exceeds 256, then the VF must provide
+ * additional pages in this array. The PF will then DMA up to
+ * 512 vfdi_endpoint structures into each page. These addresses
+ * must be page-aligned.
+ */
+struct vfdi_req {
+ u32 op;
+ u32 reserved1;
+ s32 rc;
+ u32 reserved2;
+ union {
+ struct {
+ u32 index;
+ u32 buf_count;
+ u64 addr[];
+ } init_evq;
+ struct {
+ u32 index;
+ u32 buf_count;
+ u32 evq;
+ u32 label;
+ u32 flags;
+#define VFDI_RXQ_FLAG_SCATTER_EN 1
+ u32 reserved;
+ u64 addr[];
+ } init_rxq;
+ struct {
+ u32 index;
+ u32 buf_count;
+ u32 evq;
+ u32 label;
+ u32 flags;
+#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1
+#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2
+ u32 reserved;
+ u64 addr[];
+ } init_txq;
+ struct {
+ u32 rxq;
+ u32 flags;
+#define VFDI_MAC_FILTER_FLAG_RSS 1
+#define VFDI_MAC_FILTER_FLAG_SCATTER 2
+ } mac_filter;
+ struct {
+ u64 dma_addr;
+ u64 peer_page_count;
+ u64 peer_page_addr[];
+ } set_status_page;
+ } u;
+};
+
+/**
+ * struct vfdi_status - Status provided by PF driver to VF driver
+ * @generation_start: A generation count DMA'd to VF *before* the
+ * rest of the structure.
+ * @generation_end: A generation count DMA'd to VF *after* the
+ * rest of the structure.
+ * @version: Version of this structure; currently set to 1. Later
+ * versions must either be layout-compatible or only be sent to VFs
+ * that specifically request them.
+ * @length: Total length of this structure including embedded tables
+ * @vi_scale: log2 the number of VIs available on this VF. This quantity
+ * is used by the hardware for register decoding.
+ * @max_tx_channels: The maximum number of transmit queues the VF can use.
+ * @rss_rxq_count: The number of receive queues present in the shared RSS
+ * indirection table.
+ * @peer_count: Total number of peers in the complete peer list. If larger
+ * than ARRAY_SIZE(%peers), then the VF must provide sufficient
+ * additional pages each of which is filled with vfdi_endpoint structures.
+ * @local: The MAC address and outer VLAN tag of *this* VF
+ * @peers: Table of peer addresses. The @tci fields in these structures
+ * are currently unused and must be ignored. Additional peers are
+ * written into any additional pages provided by the VF.
+ * @timer_quantum_ns: Timer quantum (nominal period between timer ticks)
+ * for interrupt moderation timers, in nanoseconds. This member is only
+ * present if @length is sufficiently large.
+ */
+struct vfdi_status {
+ u32 generation_start;
+ u32 generation_end;
+ u32 version;
+ u32 length;
+ u8 vi_scale;
+ u8 max_tx_channels;
+ u8 rss_rxq_count;
+ u8 reserved1;
+ u16 peer_count;
+ u16 reserved2;
+ struct vfdi_endpoint local;
+ struct vfdi_endpoint peers[256];
+
+ /* Members below here extend version 1 of this structure */
+ u32 timer_quantum_ns;
+};
+
+#endif
diff --git a/kernel/drivers/net/ethernet/sfc/workarounds.h b/kernel/drivers/net/ethernet/sfc/workarounds.h
new file mode 100644
index 000000000..2310b75d4
--- /dev/null
+++ b/kernel/drivers/net/ethernet/sfc/workarounds.h
@@ -0,0 +1,53 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_WORKAROUNDS_H
+#define EFX_WORKAROUNDS_H
+
+/*
+ * Hardware workarounds.
+ * Bug numbers are from Solarflare's Bugzilla.
+ */
+
+#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
+#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
+#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
+#define EFX_WORKAROUND_10G(efx) 1
+
+/* Bit-bashed I2C reads cause performance drop */
+#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
+/* Truncated IPv4 packets can confuse the TX packet parser */
+#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
+/* Legacy interrupt storm when interrupt fifo fills */
+#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
+
+/* Spurious parity errors in TSORT buffers */
+#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
+/* Unaligned read request >512 bytes after aligning may break TSORT */
+#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
+/* iSCSI parsing errors */
+#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
+/* RX events go missing */
+#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A
+/* RX_RESET on A1 */
+#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A
+/* Increase filter depth to avoid RX_RESET */
+#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
+/* Flushes may never complete */
+#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
+/* Leak overlength packets rather than free */
+#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
+
+/* Lockup when writing event block registers at gen2/gen3 */
+#define EFX_EF10_WORKAROUND_35388(efx) \
+ (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
+#define EFX_WORKAROUND_35388(efx) \
+ (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
+
+#endif /* EFX_WORKAROUNDS_H */