diff options
Diffstat (limited to 'kernel/drivers/net/ethernet/sfc')
26 files changed, 6498 insertions, 1728 deletions
diff --git a/kernel/drivers/net/ethernet/sfc/Kconfig b/kernel/drivers/net/ethernet/sfc/Kconfig index 088921294..4dd92b7b8 100644 --- a/kernel/drivers/net/ethernet/sfc/Kconfig +++ b/kernel/drivers/net/ethernet/sfc/Kconfig @@ -36,3 +36,12 @@ config SFC_SRIOV This enables support for the SFC9000 I/O Virtualization features, allowing accelerated network performance in virtualized environments. +config SFC_MCDI_LOGGING + bool "Solarflare SFC9000/SFC9100-family MCDI logging support" + depends on SFC + default y + ---help--- + This enables support for tracing of MCDI (Management-Controller-to- + Driver-Interface) commands and responses, allowing debugging of + driver/firmware interaction. The tracing is actually enabled by + a sysfs file 'mcdi_logging' under the PCI device. diff --git a/kernel/drivers/net/ethernet/sfc/Makefile b/kernel/drivers/net/ethernet/sfc/Makefile index 3a83c0dca..ce8470fe7 100644 --- a/kernel/drivers/net/ethernet/sfc/Makefile +++ b/kernel/drivers/net/ethernet/sfc/Makefile @@ -3,6 +3,6 @@ sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \ tenxpress.o txc43128_phy.o falcon_boards.o \ mcdi.o mcdi_port.o mcdi_mon.o ptp.o sfc-$(CONFIG_SFC_MTD) += mtd.o -sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o +sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o obj-$(CONFIG_SFC) += sfc.o diff --git a/kernel/drivers/net/ethernet/sfc/ef10.c b/kernel/drivers/net/ethernet/sfc/ef10.c index fbb6cfa0f..e6a084a6b 100644 --- a/kernel/drivers/net/ethernet/sfc/ef10.c +++ b/kernel/drivers/net/ethernet/sfc/ef10.c @@ -15,6 +15,7 @@ #include "nic.h" #include "workarounds.h" #include "selftest.h" +#include "ef10_sriov.h" #include <linux/in.h> #include <linux/jhash.h> #include <linux/wait.h> @@ -30,6 +31,9 @@ enum { /* The reserved RSS context value */ #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff +/* The maximum size of a shared RSS context */ +/* TODO: this should really be from the mcdi protocol export */ +#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL /* The filter table(s) are managed by firmware and we have write-only * access. When removing filters we must identify them to the @@ -45,6 +49,12 @@ enum { */ #define HUNT_FILTER_TBL_ROWS 8192 +#define EFX_EF10_FILTER_ID_INVALID 0xffff +struct efx_ef10_dev_addr { + u8 addr[ETH_ALEN]; + u16 id; +}; + struct efx_ef10_filter_table { /* The RX match field masks supported by this fw & hw, in order of priority */ enum efx_filter_match_flags rx_match_flags[ @@ -65,19 +75,19 @@ struct efx_ef10_filter_table { /* Shadow of net_device address lists, guarded by mac_lock */ #define EFX_EF10_FILTER_DEV_UC_MAX 32 #define EFX_EF10_FILTER_DEV_MC_MAX 256 - struct { - u8 addr[ETH_ALEN]; - u16 id; - } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX], - dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; - int dev_uc_count; /* negative for PROMISC */ - int dev_mc_count; /* negative for PROMISC/ALLMULTI */ + struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; + struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; + int dev_uc_count; + int dev_mc_count; +/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */ + u16 ucdef_id; + u16 bcast_id; + u16 mcdef_id; }; /* An arbitrary search limit for the software hash table */ #define EFX_EF10_FILTER_SEARCH_LIMIT 200 -static void efx_ef10_rx_push_rss_config(struct efx_nic *efx); static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); static void efx_ef10_filter_table_remove(struct efx_nic *efx); @@ -92,9 +102,55 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) { - return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]); + int bar; + + bar = efx->type->mem_bar; + return resource_size(&efx->pci_dev->resource[bar]); +} + +static bool efx_ef10_is_vf(struct efx_nic *efx) +{ + return efx->type->is_vf; +} + +static int efx_ef10_get_pf_index(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, + sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + + nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF); + return 0; } +#ifdef CONFIG_SFC_SRIOV +static int efx_ef10_get_vf_index(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, + sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < sizeof(outbuf)) + return -EIO; + + nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); + return 0; +} +#endif + static int efx_ef10_init_datapath_caps(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); @@ -117,6 +173,13 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx) nic_data->datapath_caps = MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); + /* record the DPCPU firmware IDs to determine VEB vswitching support. + */ + nic_data->rx_dpcpu_fw_id = + MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); + nic_data->tx_dpcpu_fw_id = + MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); + if (!(nic_data->datapath_caps & (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) { netif_err(efx, drv, efx->net_dev, @@ -147,7 +210,7 @@ static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) return rc > 0 ? rc : -ERANGE; } -static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address) +static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); size_t outlen; @@ -167,19 +230,76 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address) return 0; } +static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); + size_t outlen; + int num_addrs, rc; + + MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, + EVB_PORT_ID_ASSIGNED); + rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + + if (rc) + return rc; + if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) + return -EIO; + + num_addrs = MCDI_DWORD(outbuf, + VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); + + WARN_ON(num_addrs != 1); + + ether_addr_copy(mac_address, + MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); + + return 0; +} + +static ssize_t efx_ef10_show_link_control_flag(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + return sprintf(buf, "%d\n", + ((efx->mcdi->fn_flags) & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) + ? 1 : 0); +} + +static ssize_t efx_ef10_show_primary_flag(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + return sprintf(buf, "%d\n", + ((efx->mcdi->fn_flags) & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) + ? 1 : 0); +} + +static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, + NULL); +static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); + static int efx_ef10_probe(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data; + struct net_device *net_dev = efx->net_dev; int i, rc; /* We can have one VI for each 8K region. However, until we * use TX option descriptors we need two TX queues per channel. */ - efx->max_channels = - min_t(unsigned int, - EFX_MAX_CHANNELS, - resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / - (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); + efx->max_channels = min_t(unsigned int, + EFX_MAX_CHANNELS, + efx_ef10_mem_map_size(efx) / + (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); + efx->max_tx_channels = efx->max_channels; if (WARN_ON(efx->max_channels == 0)) return -EIO; @@ -188,6 +308,9 @@ static int efx_ef10_probe(struct efx_nic *efx) return -ENOMEM; efx->nic_data = nic_data; + /* we assume later that we can copy from this buffer in dwords */ + BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); + rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); if (rc) @@ -209,6 +332,8 @@ static int efx_ef10_probe(struct efx_nic *efx) nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; + nic_data->vport_id = EVB_PORT_ID_ASSIGNED; + /* In case we're recovering from a crash (kexec), we want to * cancel any outstanding request by the previous user of this * function. We send a special message using the least @@ -230,45 +355,85 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc) goto fail3; + rc = device_create_file(&efx->pci_dev->dev, + &dev_attr_link_control_flag); + if (rc) + goto fail3; + + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); + if (rc) + goto fail4; + + rc = efx_ef10_get_pf_index(efx); + if (rc) + goto fail5; + rc = efx_ef10_init_datapath_caps(efx); if (rc < 0) - goto fail3; + goto fail5; efx->rx_packet_len_offset = ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; rc = efx_mcdi_port_get_number(efx); if (rc < 0) - goto fail3; + goto fail5; efx->port_num = rc; + net_dev->dev_port = rc; - rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr); + rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); if (rc) - goto fail3; + goto fail5; rc = efx_ef10_get_sysclk_freq(efx); if (rc < 0) - goto fail3; + goto fail5; efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ - /* Check whether firmware supports bug 35388 workaround */ - rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); - if (rc == 0) + /* Check whether firmware supports bug 35388 workaround. + * First try to enable it, then if we get EPERM, just + * ask if it's already enabled + */ + rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL); + if (rc == 0) { nic_data->workaround_35388 = true; - else if (rc != -ENOSYS && rc != -ENOENT) - goto fail3; + } else if (rc == -EPERM) { + unsigned int enabled; + + rc = efx_mcdi_get_workarounds(efx, NULL, &enabled); + if (rc) + goto fail3; + nic_data->workaround_35388 = enabled & + MC_CMD_GET_WORKAROUNDS_OUT_BUG35388; + } else if (rc != -ENOSYS && rc != -ENOENT) { + goto fail5; + } netif_dbg(efx, probe, efx->net_dev, "workaround for bug 35388 is %sabled\n", nic_data->workaround_35388 ? "en" : "dis"); rc = efx_mcdi_mon_probe(efx); - if (rc) - goto fail3; + if (rc && rc != -EPERM) + goto fail5; efx_ptp_probe(efx, NULL); +#ifdef CONFIG_SFC_SRIOV + if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { + struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; + struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); + + efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); + } else +#endif + ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); + return 0; +fail5: + device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); +fail4: + device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); fail3: efx_mcdi_fini(efx); fail2: @@ -281,7 +446,7 @@ fail1: static int efx_ef10_free_vis(struct efx_nic *efx) { - MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0); + MCDI_DECLARE_BUF_ERR(outbuf); size_t outlen; int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, outbuf, sizeof(outbuf), &outlen); @@ -352,9 +517,9 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) static int efx_ef10_link_piobufs(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; - MCDI_DECLARE_BUF(inbuf, - max(MC_CMD_LINK_PIOBUF_IN_LEN, - MC_CMD_UNLINK_PIOBUF_IN_LEN)); + _MCDI_DECLARE_BUF(inbuf, + max(MC_CMD_LINK_PIOBUF_IN_LEN, + MC_CMD_UNLINK_PIOBUF_IN_LEN)); struct efx_channel *channel; struct efx_tx_queue *tx_queue; unsigned int offset, index; @@ -363,6 +528,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx) BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); + memset(inbuf, 0, sizeof(inbuf)); + /* Link a buffer to each VI in the write-combining mapping */ for (index = 0; index < nic_data->n_piobufs; ++index) { MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, @@ -475,6 +642,25 @@ static void efx_ef10_remove(struct efx_nic *efx) struct efx_ef10_nic_data *nic_data = efx->nic_data; int rc; +#ifdef CONFIG_SFC_SRIOV + struct efx_ef10_nic_data *nic_data_pf; + struct pci_dev *pci_dev_pf; + struct efx_nic *efx_pf; + struct ef10_vf *vf; + + if (efx->pci_dev->is_virtfn) { + pci_dev_pf = efx->pci_dev->physfn; + if (pci_dev_pf) { + efx_pf = pci_get_drvdata(pci_dev_pf); + nic_data_pf = efx_pf->nic_data; + vf = nic_data_pf->vf + nic_data->vf_index; + vf->efx = NULL; + } else + netif_info(efx, drv, efx->net_dev, + "Could not get the PF id from VF\n"); + } +#endif + efx_ptp_remove(efx); efx_mcdi_mon_remove(efx); @@ -490,11 +676,120 @@ static void efx_ef10_remove(struct efx_nic *efx) if (!nic_data->must_restore_piobufs) efx_ef10_free_piobufs(efx); + device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); + device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); + efx_mcdi_fini(efx); efx_nic_free_buffer(efx, &nic_data->mcdi_buf); kfree(nic_data); } +static int efx_ef10_probe_pf(struct efx_nic *efx) +{ + return efx_ef10_probe(efx); +} + +int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); + + MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); + return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); + + MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); + return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +int efx_ef10_vport_add_mac(struct efx_nic *efx, + unsigned int port_id, u8 *mac) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); + + MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); + ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); + + return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, + sizeof(inbuf), NULL, 0, NULL); +} + +int efx_ef10_vport_del_mac(struct efx_nic *efx, + unsigned int port_id, u8 *mac) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); + + MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); + ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); + + return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, + sizeof(inbuf), NULL, 0, NULL); +} + +#ifdef CONFIG_SFC_SRIOV +static int efx_ef10_probe_vf(struct efx_nic *efx) +{ + int rc; + struct pci_dev *pci_dev_pf; + + /* If the parent PF has no VF data structure, it doesn't know about this + * VF so fail probe. The VF needs to be re-created. This can happen + * if the PF driver is unloaded while the VF is assigned to a guest. + */ + pci_dev_pf = efx->pci_dev->physfn; + if (pci_dev_pf) { + struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); + struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; + + if (!nic_data_pf->vf) { + netif_info(efx, drv, efx->net_dev, + "The VF cannot link to its parent PF; " + "please destroy and re-create the VF\n"); + return -EBUSY; + } + } + + rc = efx_ef10_probe(efx); + if (rc) + return rc; + + rc = efx_ef10_get_vf_index(efx); + if (rc) + goto fail; + + if (efx->pci_dev->is_virtfn) { + if (efx->pci_dev->physfn) { + struct efx_nic *efx_pf = + pci_get_drvdata(efx->pci_dev->physfn); + struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + nic_data_p->vf[nic_data->vf_index].efx = efx; + nic_data_p->vf[nic_data->vf_index].pci_dev = + efx->pci_dev; + } else + netif_info(efx, drv, efx->net_dev, + "Could not get the PF id from VF\n"); + } + + return 0; + +fail: + efx_ef10_remove(efx); + return rc; +} +#else +static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) +{ + return 0; +} +#endif + static int efx_ef10_alloc_vis(struct efx_nic *efx, unsigned int min_vis, unsigned int max_vis) { @@ -529,11 +824,13 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; unsigned int uc_mem_map_size, wc_mem_map_size; - unsigned int min_vis, pio_write_vi_base, max_vis; + unsigned int min_vis = max(EFX_TXQ_TYPES, + efx_separate_tx_channels ? 2 : 1); + unsigned int channel_vis, pio_write_vi_base, max_vis; void __iomem *membase; int rc; - min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); + channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); #ifdef EFX_USE_PIO /* Try to allocate PIO buffers if wanted and if the full @@ -567,11 +864,11 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) * page size is >4K). So we may allocate some extra VIs just * for writing PIO buffers through. * - * The UC mapping contains (min_vis - 1) complete VIs and the + * The UC mapping contains (channel_vis - 1) complete VIs and the * first half of the next VI. Then the WC mapping begins with * the second half of this last VI. */ - uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE + + uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF); if (nic_data->n_piobufs) { /* pio_write_vi_base rounds down to give the number of complete @@ -586,7 +883,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) } else { pio_write_vi_base = 0; wc_mem_map_size = 0; - max_vis = min_vis; + max_vis = channel_vis; } /* In case the last attached driver failed to free VIs, do it now */ @@ -598,6 +895,23 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) if (rc != 0) return rc; + if (nic_data->n_allocated_vis < channel_vis) { + netif_info(efx, drv, efx->net_dev, + "Could not allocate enough VIs to satisfy RSS" + " requirements. Performance may not be optimal.\n"); + /* We didn't get the VIs to populate our channels. + * We could keep what we got but then we'd have more + * interrupts than we need. + * Instead calculate new max_channels and restart + */ + efx->max_channels = nic_data->n_allocated_vis; + efx->max_tx_channels = + nic_data->n_allocated_vis / EFX_TXQ_TYPES; + + efx_ef10_free_vis(efx); + return -EAGAIN; + } + /* If we didn't get enough VIs to map all the PIO buffers, free the * PIO buffers */ @@ -687,19 +1001,41 @@ static int efx_ef10_init_nic(struct efx_nic *efx) nic_data->must_restore_piobufs = false; } - efx_ef10_rx_push_rss_config(efx); + /* don't fail init if RSS setup doesn't work */ + efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table); + return 0; } static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; +#ifdef CONFIG_SFC_SRIOV + unsigned int i; +#endif /* All our allocations have been reset */ nic_data->must_realloc_vis = true; nic_data->must_restore_filters = true; nic_data->must_restore_piobufs = true; nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; + + /* Driver-created vswitches and vports must be re-created */ + nic_data->must_probe_vswitching = true; + nic_data->vport_id = EVB_PORT_ID_ASSIGNED; +#ifdef CONFIG_SFC_SRIOV + if (nic_data->vf) + for (i = 0; i < efx->vf_count; i++) + nic_data->vf[i].vport_id = 0; +#endif +} + +static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) +{ + if (reason == RESET_TYPE_MC_FAILURE) + return RESET_TYPE_DATAPATH; + + return efx_mcdi_map_reset_reason(reason); } static int efx_ef10_map_reset_flags(u32 *flags) @@ -736,6 +1072,12 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) { int rc = efx_mcdi_reset(efx, reset_type); + /* Unprivileged functions return -EPERM, but need to return success + * here so that the datapath is brought back up. + */ + if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) + rc = 0; + /* If it was a port reset, trigger reallocation of MC resources. * Note that on an MC reset nothing needs to be done now because we'll * detect the MC reset later and handle it then. @@ -760,93 +1102,112 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { - EF10_DMA_STAT(tx_bytes, TX_BYTES), - EF10_DMA_STAT(tx_packets, TX_PKTS), - EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS), - EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS), - EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), - EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), - EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), - EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS), - EF10_DMA_STAT(tx_64, TX_64_PKTS), - EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), - EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), - EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), - EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), - EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), - EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), - EF10_DMA_STAT(rx_bytes, RX_BYTES), - EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES), - EF10_OTHER_STAT(rx_good_bytes), - EF10_OTHER_STAT(rx_bad_bytes), - EF10_DMA_STAT(rx_packets, RX_PKTS), - EF10_DMA_STAT(rx_good, RX_GOOD_PKTS), - EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), - EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS), - EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS), - EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), - EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), - EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), - EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), - EF10_DMA_STAT(rx_64, RX_64_PKTS), - EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), - EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), - EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), - EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), - EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), - EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), - EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), - EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), - EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), - EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), - EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), - EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), + EF10_DMA_STAT(port_tx_bytes, TX_BYTES), + EF10_DMA_STAT(port_tx_packets, TX_PKTS), + EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), + EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), + EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), + EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), + EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), + EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), + EF10_DMA_STAT(port_tx_64, TX_64_PKTS), + EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), + EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), + EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), + EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), + EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), + EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), + EF10_DMA_STAT(port_rx_bytes, RX_BYTES), + EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), + EF10_OTHER_STAT(port_rx_good_bytes), + EF10_OTHER_STAT(port_rx_bad_bytes), + EF10_DMA_STAT(port_rx_packets, RX_PKTS), + EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), + EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), + EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), + EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), + EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), + EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), + EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), + EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), + EF10_DMA_STAT(port_rx_64, RX_64_PKTS), + EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), + EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), + EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), + EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), + EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), + EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), + EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), + EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), + EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), + EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), + EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), + EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), GENERIC_SW_STAT(rx_nodesc_trunc), GENERIC_SW_STAT(rx_noskb_drops), - EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), - EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), - EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), - EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), - EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB), - EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB), - EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING), - EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), - EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), - EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS), - EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), - EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), + EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), + EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), + EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), + EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), + EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), + EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), + EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), + EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), + EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), + EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), + EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), + EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), + EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), + EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), + EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), + EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), + EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), + EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), + EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), + EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), + EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), + EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), + EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), + EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), + EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), + EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), + EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), + EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), + EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), + EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), }; -#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ - (1ULL << EF10_STAT_tx_packets) | \ - (1ULL << EF10_STAT_tx_pause) | \ - (1ULL << EF10_STAT_tx_unicast) | \ - (1ULL << EF10_STAT_tx_multicast) | \ - (1ULL << EF10_STAT_tx_broadcast) | \ - (1ULL << EF10_STAT_rx_bytes) | \ - (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \ - (1ULL << EF10_STAT_rx_good_bytes) | \ - (1ULL << EF10_STAT_rx_bad_bytes) | \ - (1ULL << EF10_STAT_rx_packets) | \ - (1ULL << EF10_STAT_rx_good) | \ - (1ULL << EF10_STAT_rx_bad) | \ - (1ULL << EF10_STAT_rx_pause) | \ - (1ULL << EF10_STAT_rx_control) | \ - (1ULL << EF10_STAT_rx_unicast) | \ - (1ULL << EF10_STAT_rx_multicast) | \ - (1ULL << EF10_STAT_rx_broadcast) | \ - (1ULL << EF10_STAT_rx_lt64) | \ - (1ULL << EF10_STAT_rx_64) | \ - (1ULL << EF10_STAT_rx_65_to_127) | \ - (1ULL << EF10_STAT_rx_128_to_255) | \ - (1ULL << EF10_STAT_rx_256_to_511) | \ - (1ULL << EF10_STAT_rx_512_to_1023) | \ - (1ULL << EF10_STAT_rx_1024_to_15xx) | \ - (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \ - (1ULL << EF10_STAT_rx_gtjumbo) | \ - (1ULL << EF10_STAT_rx_bad_gtjumbo) | \ - (1ULL << EF10_STAT_rx_overflow) | \ - (1ULL << EF10_STAT_rx_nodesc_drops) | \ +#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ + (1ULL << EF10_STAT_port_tx_packets) | \ + (1ULL << EF10_STAT_port_tx_pause) | \ + (1ULL << EF10_STAT_port_tx_unicast) | \ + (1ULL << EF10_STAT_port_tx_multicast) | \ + (1ULL << EF10_STAT_port_tx_broadcast) | \ + (1ULL << EF10_STAT_port_rx_bytes) | \ + (1ULL << \ + EF10_STAT_port_rx_bytes_minus_good_bytes) | \ + (1ULL << EF10_STAT_port_rx_good_bytes) | \ + (1ULL << EF10_STAT_port_rx_bad_bytes) | \ + (1ULL << EF10_STAT_port_rx_packets) | \ + (1ULL << EF10_STAT_port_rx_good) | \ + (1ULL << EF10_STAT_port_rx_bad) | \ + (1ULL << EF10_STAT_port_rx_pause) | \ + (1ULL << EF10_STAT_port_rx_control) | \ + (1ULL << EF10_STAT_port_rx_unicast) | \ + (1ULL << EF10_STAT_port_rx_multicast) | \ + (1ULL << EF10_STAT_port_rx_broadcast) | \ + (1ULL << EF10_STAT_port_rx_lt64) | \ + (1ULL << EF10_STAT_port_rx_64) | \ + (1ULL << EF10_STAT_port_rx_65_to_127) | \ + (1ULL << EF10_STAT_port_rx_128_to_255) | \ + (1ULL << EF10_STAT_port_rx_256_to_511) | \ + (1ULL << EF10_STAT_port_rx_512_to_1023) |\ + (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ + (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ + (1ULL << EF10_STAT_port_rx_gtjumbo) | \ + (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ + (1ULL << EF10_STAT_port_rx_overflow) | \ + (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ (1ULL << GENERIC_STAT_rx_noskb_drops)) @@ -854,39 +1215,39 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { * switchable port we do not expose these because they might not * include all the packets they should. */ -#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \ - (1ULL << EF10_STAT_tx_lt64) | \ - (1ULL << EF10_STAT_tx_64) | \ - (1ULL << EF10_STAT_tx_65_to_127) | \ - (1ULL << EF10_STAT_tx_128_to_255) | \ - (1ULL << EF10_STAT_tx_256_to_511) | \ - (1ULL << EF10_STAT_tx_512_to_1023) | \ - (1ULL << EF10_STAT_tx_1024_to_15xx) | \ - (1ULL << EF10_STAT_tx_15xx_to_jumbo)) +#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ + (1ULL << EF10_STAT_port_tx_lt64) | \ + (1ULL << EF10_STAT_port_tx_64) | \ + (1ULL << EF10_STAT_port_tx_65_to_127) |\ + (1ULL << EF10_STAT_port_tx_128_to_255) |\ + (1ULL << EF10_STAT_port_tx_256_to_511) |\ + (1ULL << EF10_STAT_port_tx_512_to_1023) |\ + (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ + (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) /* These statistics are only provided by the 40G MAC. For a 10G/40G * switchable port we do expose these because the errors will otherwise * be silent. */ -#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ - (1ULL << EF10_STAT_rx_length_error)) +#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ + (1ULL << EF10_STAT_port_rx_length_error)) /* These statistics are only provided if the firmware supports the * capability PM_AND_RXDP_COUNTERS. */ #define HUNT_PM_AND_RXDP_STAT_MASK ( \ - (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \ - (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \ - (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \ - (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \ - (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \ - (1ULL << EF10_STAT_rx_pm_discard_qbb) | \ - (1ULL << EF10_STAT_rx_pm_discard_mapping) | \ - (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \ - (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \ - (1ULL << EF10_STAT_rx_dp_streaming_packets) | \ - (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \ - (1ULL << EF10_STAT_rx_dp_hlb_wait)) + (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ + (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ + (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ + (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ + (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ + (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ + (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ + (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ + (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ + (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ + (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ + (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) { @@ -894,6 +1255,10 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) u32 port_caps = efx_mcdi_phy_get_caps(efx); struct efx_ef10_nic_data *nic_data = efx->nic_data; + if (!(efx->mcdi->fn_flags & + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) + return 0; + if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) raw_mask |= HUNT_40G_EXTRA_STAT_MASK; else @@ -908,13 +1273,28 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) { - u64 raw_mask = efx_ef10_raw_stat_mask(efx); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u64 raw_mask[2]; + + raw_mask[0] = efx_ef10_raw_stat_mask(efx); + + /* Only show vadaptor stats when EVB capability is present */ + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { + raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); + raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1; + } else { + raw_mask[1] = 0; + } #if BITS_PER_LONG == 64 - mask[0] = raw_mask; + mask[0] = raw_mask[0]; + mask[1] = raw_mask[1]; #else - mask[0] = raw_mask & 0xffffffff; - mask[1] = raw_mask >> 32; + mask[0] = raw_mask[0] & 0xffffffff; + mask[1] = raw_mask[0] >> 32; + mask[2] = raw_mask[1] & 0xffffffff; + mask[3] = raw_mask[1] >> 32; #endif } @@ -927,7 +1307,76 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) mask, names); } -static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) +static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + size_t stats_count = 0, index; + + efx_ef10_get_stat_mask(efx, mask); + + if (full_stats) { + for_each_set_bit(index, mask, EF10_STAT_COUNT) { + if (efx_ef10_stat_desc[index].name) { + *full_stats++ = stats[index]; + ++stats_count; + } + } + } + + if (!core_stats) + return stats_count; + + if (nic_data->datapath_caps & + 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) { + /* Use vadaptor stats. */ + core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + + stats[EF10_STAT_rx_multicast] + + stats[EF10_STAT_rx_broadcast]; + core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + + stats[EF10_STAT_tx_multicast] + + stats[EF10_STAT_tx_broadcast]; + core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + + stats[EF10_STAT_rx_multicast_bytes] + + stats[EF10_STAT_rx_broadcast_bytes]; + core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + + stats[EF10_STAT_tx_multicast_bytes] + + stats[EF10_STAT_tx_broadcast_bytes]; + core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[EF10_STAT_rx_multicast]; + core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; + core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; + core_stats->rx_errors = core_stats->rx_crc_errors; + core_stats->tx_errors = stats[EF10_STAT_tx_bad]; + } else { + /* Use port stats. */ + core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; + core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; + core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; + core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; + core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + + stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; + core_stats->rx_length_errors = + stats[EF10_STAT_port_rx_gtjumbo] + + stats[EF10_STAT_port_rx_length_error]; + core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; + core_stats->rx_frame_errors = + stats[EF10_STAT_port_rx_align_error]; + core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors); + } + + return stats_count; +} + +static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; DECLARE_BITMAP(mask, EF10_STAT_COUNT); @@ -952,67 +1401,114 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) return -EAGAIN; /* Update derived statistics */ - efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]); - stats[EF10_STAT_rx_good_bytes] = - stats[EF10_STAT_rx_bytes] - - stats[EF10_STAT_rx_bytes_minus_good_bytes]; - efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes], - stats[EF10_STAT_rx_bytes_minus_good_bytes]); + efx_nic_fix_nodesc_drop_stat(efx, + &stats[EF10_STAT_port_rx_nodesc_drops]); + stats[EF10_STAT_port_rx_good_bytes] = + stats[EF10_STAT_port_rx_bytes] - + stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; + efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], + stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); efx_update_sw_stats(efx, stats); return 0; } -static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, - struct rtnl_link_stats64 *core_stats) +static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) { - DECLARE_BITMAP(mask, EF10_STAT_COUNT); - struct efx_ef10_nic_data *nic_data = efx->nic_data; - u64 *stats = nic_data->stats; - size_t stats_count = 0, index; int retry; - efx_ef10_get_stat_mask(efx, mask); - /* If we're unlucky enough to read statistics during the DMA, wait * up to 10ms for it to finish (typically takes <500us) */ for (retry = 0; retry < 100; ++retry) { - if (efx_ef10_try_update_nic_stats(efx) == 0) + if (efx_ef10_try_update_nic_stats_pf(efx) == 0) break; udelay(100); } - if (full_stats) { - for_each_set_bit(index, mask, EF10_STAT_COUNT) { - if (efx_ef10_stat_desc[index].name) { - *full_stats++ = stats[index]; - ++stats_count; - } - } + return efx_ef10_update_stats_common(efx, full_stats, core_stats); +} + +static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + __le64 generation_start, generation_end; + u64 *stats = nic_data->stats; + u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64); + struct efx_buffer stats_buf; + __le64 *dma_stats; + int rc; + + spin_unlock_bh(&efx->stats_lock); + + if (in_interrupt()) { + /* If in atomic context, cannot update stats. Just update the + * software stats and return so the caller can continue. + */ + spin_lock_bh(&efx->stats_lock); + efx_update_sw_stats(efx, stats); + return 0; } - if (core_stats) { - core_stats->rx_packets = stats[EF10_STAT_rx_packets]; - core_stats->tx_packets = stats[EF10_STAT_tx_packets]; - core_stats->rx_bytes = stats[EF10_STAT_rx_bytes]; - core_stats->tx_bytes = stats[EF10_STAT_tx_bytes]; - core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] + - stats[GENERIC_STAT_rx_nodesc_trunc] + - stats[GENERIC_STAT_rx_noskb_drops]; - core_stats->multicast = stats[EF10_STAT_rx_multicast]; - core_stats->rx_length_errors = - stats[EF10_STAT_rx_gtjumbo] + - stats[EF10_STAT_rx_length_error]; - core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; - core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error]; - core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; - core_stats->rx_errors = (core_stats->rx_length_errors + - core_stats->rx_crc_errors + - core_stats->rx_frame_errors); + efx_ef10_get_stat_mask(efx, mask); + + rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); + if (rc) { + spin_lock_bh(&efx->stats_lock); + return rc; } - return stats_count; + dma_stats = stats_buf.addr; + dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; + + MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); + MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, + MAC_STATS_IN_DMA, 1); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), + NULL, 0, NULL); + spin_lock_bh(&efx->stats_lock); + if (rc) { + /* Expect ENOENT if DMA queues have not been set up */ + if (rc != -ENOENT || atomic_read(&efx->active_queues)) + efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, + sizeof(inbuf), NULL, 0, rc); + goto out; + } + + generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { + WARN_ON_ONCE(1); + goto out; + } + rmb(); + efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, + stats, stats_buf.addr, false); + rmb(); + generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; + if (generation_end != generation_start) { + rc = -EAGAIN; + goto out; + } + + efx_update_sw_stats(efx, stats); +out: + efx_nic_free_buffer(efx, &stats_buf); + return rc; +} + +static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + if (efx_ef10_try_update_nic_stats_vf(efx)) + return 0; + + return efx_ef10_update_stats_common(efx, full_stats, core_stats); } static void efx_ef10_push_irq_moderation(struct efx_channel *channel) @@ -1044,6 +1540,14 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel) } } +static void efx_ef10_get_wol_vf(struct efx_nic *efx, + struct ethtool_wolinfo *wol) {} + +static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) +{ + return -EOPNOTSUPP; +} + static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) { wol->supported = 0; @@ -1100,6 +1604,22 @@ efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, memcpy(outbuf, pdu + offset, outlen); } +static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + /* All our allocations have been reset */ + efx_ef10_reset_mc_allocations(efx); + + /* The datapath firmware might have been changed */ + nic_data->must_check_datapath_caps = true; + + /* MAC statistics have been cleared on the NIC; clear the local + * statistic that we update with efx_update_diff_stat(). + */ + nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; +} + static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -1119,17 +1639,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) return 0; nic_data->warm_boot_count = rc; - - /* All our allocations have been reset */ - efx_ef10_reset_mc_allocations(efx); - - /* The datapath firmware might have been changed */ - nic_data->must_check_datapath_caps = true; - - /* MAC statistics have been cleared on the NIC; clear the local - * statistic that we update with efx_update_diff_stat(). - */ - nic_data->stats[EF10_STAT_rx_bad_bytes] = 0; + efx_ef10_mcdi_reboot_detected(efx); return -EIO; } @@ -1232,16 +1742,17 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) { MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / EFX_BUF_SIZE)); - MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN); bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; struct efx_channel *channel = tx_queue->channel; struct efx_nic *efx = tx_queue->efx; - size_t inlen, outlen; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t inlen; dma_addr_t dma_addr; efx_qword_t *txd; int rc; int i; + BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); @@ -1251,7 +1762,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); - MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id); dma_addr = tx_queue->txd.buf.dma_addr; @@ -1266,7 +1777,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen, - outbuf, sizeof(outbuf), &outlen); + NULL, 0, NULL); if (rc) goto fail; @@ -1299,7 +1810,7 @@ fail: static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) { MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); struct efx_nic *efx = tx_queue->efx; size_t outlen; int rc; @@ -1344,7 +1855,9 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) unsigned int write_ptr; efx_qword_t *txd; - BUG_ON(tx_queue->write_count == tx_queue->insert_count); + tx_queue->xmit_more_available = false; + if (unlikely(tx_queue->write_count == tx_queue->insert_count)) + return; do { write_ptr = tx_queue->write_count & tx_queue->ptr_mask; @@ -1378,19 +1891,33 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) } } -static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context) +static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, + bool exclusive, unsigned *context_size) { MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; size_t outlen; int rc; + u32 alloc_type = exclusive ? + MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE : + MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; + unsigned rss_spread = exclusive ? + efx->rss_spread : + min(rounddown_pow_of_two(efx->rss_spread), + EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE); + + if (!exclusive && rss_spread == 1) { + *context = EFX_EF10_RSS_CONTEXT_INVALID; + if (context_size) + *context_size = 1; + return 0; + } MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, - EVB_PORT_ID_ASSIGNED); - MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, - MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE); - MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, - EFX_MAX_CHANNELS); + nic_data->vport_id); + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread); rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); @@ -1402,6 +1929,9 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context) *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); + if (context_size) + *context_size = rss_spread; + return 0; } @@ -1418,7 +1948,8 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) WARN_ON(rc != 0); } -static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context) +static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, + const u32 *rx_indir_table) { MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); @@ -1432,7 +1963,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context) for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) MCDI_PTR(tablebuf, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = - (u8) efx->rx_indir_table[i]; + (u8) rx_indir_table[i]; rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, sizeof(tablebuf), NULL, 0, NULL); @@ -1460,27 +1991,119 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; } -static void efx_ef10_rx_push_rss_config(struct efx_nic *efx) +static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, + unsigned *context_size) { + u32 new_rx_rss_context; struct efx_ef10_nic_data *nic_data = efx->nic_data; - int rc; + int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, + false, context_size); - netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n"); + if (rc != 0) + return rc; - if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) { - rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context); - if (rc != 0) - goto fail; + nic_data->rx_rss_context = new_rx_rss_context; + nic_data->rx_rss_context_exclusive = false; + efx_set_default_rx_indir_table(efx); + return 0; +} + +static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, + const u32 *rx_indir_table) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + u32 new_rx_rss_context; + + if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID || + !nic_data->rx_rss_context_exclusive) { + rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, + true, NULL); + if (rc == -EOPNOTSUPP) + return rc; + else if (rc != 0) + goto fail1; + } else { + new_rx_rss_context = nic_data->rx_rss_context; } - rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context); + rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, + rx_indir_table); if (rc != 0) - goto fail; + goto fail2; - return; + if (nic_data->rx_rss_context != new_rx_rss_context) + efx_ef10_rx_free_indir_table(efx); + nic_data->rx_rss_context = new_rx_rss_context; + nic_data->rx_rss_context_exclusive = true; + if (rx_indir_table != efx->rx_indir_table) + memcpy(efx->rx_indir_table, rx_indir_table, + sizeof(efx->rx_indir_table)); + return 0; -fail: +fail2: + if (new_rx_rss_context != nic_data->rx_rss_context) + efx_ef10_free_rss_context(efx, new_rx_rss_context); +fail1: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; +} + +static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, + const u32 *rx_indir_table) +{ + int rc; + + if (efx->rss_spread == 1) + return 0; + + rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table); + + if (rc == -ENOBUFS && !user) { + unsigned context_size; + bool mismatch = false; + size_t i; + + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch; + i++) + mismatch = rx_indir_table[i] != + ethtool_rxfh_indir_default(i, efx->rss_spread); + + rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size); + if (rc == 0) { + if (context_size != efx->rss_spread) + netif_warn(efx, probe, efx->net_dev, + "Could not allocate an exclusive RSS" + " context; allocated a shared one of" + " different size." + " Wanted %u, got %u.\n", + efx->rss_spread, context_size); + else if (mismatch) + netif_warn(efx, probe, efx->net_dev, + "Could not allocate an exclusive RSS" + " context; allocated a shared one but" + " could not apply custom" + " indirection.\n"); + else + netif_info(efx, probe, efx->net_dev, + "Could not allocate an exclusive RSS" + " context; allocated a shared one.\n"); + } + } + return rc; +} + +static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, + const u32 *rx_indir_table + __attribute__ ((unused))) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (user) + return -EOPNOTSUPP; + if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) + return 0; + return efx_ef10_rx_push_shared_rss_config(efx, NULL); } static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) @@ -1496,14 +2119,15 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / EFX_BUF_SIZE)); - MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN); struct efx_channel *channel = efx_rx_queue_channel(rx_queue); size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; struct efx_nic *efx = rx_queue->efx; - size_t inlen, outlen; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t inlen; dma_addr_t dma_addr; int rc; int i; + BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0); rx_queue->scatter_n = 0; rx_queue->scatter_len = 0; @@ -1517,7 +2141,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) INIT_RXQ_IN_FLAG_PREFIX, 1, INIT_RXQ_IN_FLAG_TIMESTAMP, 1); MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); - MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id); dma_addr = rx_queue->rxd.buf.dma_addr; @@ -1532,7 +2156,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, - outbuf, sizeof(outbuf), &outlen); + NULL, 0, NULL); if (rc) netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", efx_rx_queue_index(rx_queue)); @@ -1541,7 +2165,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) { MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); struct efx_nic *efx = rx_queue->efx; size_t outlen; int rc; @@ -1646,6 +2270,29 @@ static int efx_ef10_ev_probe(struct efx_channel *channel) GFP_KERNEL); } +static void efx_ef10_ev_fini(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + struct efx_nic *efx = channel->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, + outbuf, outlen, rc); +} + static int efx_ef10_ev_init(struct efx_channel *channel) { MCDI_DECLARE_BUF(inbuf, @@ -1657,6 +2304,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel) struct efx_ef10_nic_data *nic_data; bool supports_rx_merge; size_t inlen, outlen; + unsigned int enabled, implemented; dma_addr_t dma_addr; int rc; int i; @@ -1697,30 +2345,52 @@ static int efx_ef10_ev_init(struct efx_channel *channel) rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, outbuf, sizeof(outbuf), &outlen); /* IRQ return is ignored */ - return rc; -} - -static void efx_ef10_ev_fini(struct efx_channel *channel) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN); - struct efx_nic *efx = channel->efx; - size_t outlen; - int rc; - - MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); - - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); + if (channel->channel || rc) + return rc; - if (rc && rc != -EALREADY) + /* Successfully created event queue on channel 0 */ + rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); + if (rc == -ENOSYS) { + /* GET_WORKAROUNDS was implemented before the bug26807 + * workaround, thus the latter must be unavailable in this fw + */ + nic_data->workaround_26807 = false; + rc = 0; + } else if (rc) { goto fail; + } else { + nic_data->workaround_26807 = + !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); + + if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 && + !nic_data->workaround_26807) { + unsigned int flags; + + rc = efx_mcdi_set_workaround(efx, + MC_CMD_WORKAROUND_BUG26807, + true, &flags); + + if (!rc) { + if (flags & + 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { + netif_info(efx, drv, efx->net_dev, + "other functions on NIC have been reset\n"); + /* MC's boot count has incremented */ + ++nic_data->warm_boot_count; + } + nic_data->workaround_26807 = true; + } else if (rc == -EPERM) { + rc = 0; + } + } + } - return; + if (!rc) + return 0; fail: - efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, - outbuf, outlen, rc); + efx_ef10_ev_fini(channel); + return rc; } static void efx_ef10_ev_remove(struct efx_channel *channel) @@ -2286,11 +2956,12 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, match_fields); } - MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? MC_CMD_FILTER_OP_IN_RX_DEST_DROP : MC_CMD_FILTER_OP_IN_RX_DEST_HOST); + MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0); MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, @@ -2628,7 +3299,8 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, new_spec.priority = EFX_FILTER_PRI_AUTO; new_spec.flags = (EFX_FILTER_FLAG_RX | - EFX_FILTER_FLAG_RX_RSS); + (efx_rss_enabled(efx) ? + EFX_FILTER_FLAG_RX_RSS : 0)); new_spec.dmaq_id = 0; new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; rc = efx_ef10_filter_push(efx, &new_spec, @@ -2673,6 +3345,19 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx, filter_id, false); } +static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id) +{ + return filter_id % HUNT_FILTER_TBL_ROWS; +} + +static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + return efx_ef10_filter_remove_internal(efx, 1U << priority, + filter_id, true); +} + static int efx_ef10_filter_get_safe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id, struct efx_filter_spec *spec) @@ -3046,6 +3731,10 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) goto fail; } + table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; + table->bcast_id = EFX_EF10_FILTER_ID_INVALID; + table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + efx->filter_state = table; init_waitqueue_head(&table->waitq); return 0; @@ -3055,6 +3744,9 @@ fail: return rc; } +/* Caller must hold efx->filter_sem for read if race against + * efx_ef10_filter_table_remove() is possible + */ static void efx_ef10_filter_table_restore(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; @@ -3064,9 +3756,14 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) bool failed = false; int rc; + WARN_ON(!rwsem_is_locked(&efx->filter_sem)); + if (!nic_data->must_restore_filters) return; + if (!table) + return; + spin_lock_bh(&efx->filter_lock); for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { @@ -3102,6 +3799,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) nic_data->must_restore_filters = false; } +/* Caller must hold efx->filter_sem for write */ static void efx_ef10_filter_table_remove(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; @@ -3110,6 +3808,10 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) unsigned int filter_idx; int rc; + efx->filter_state = NULL; + if (!table) + return; + for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { spec = efx_ef10_filter_entry_spec(table, filter_idx); if (!spec) @@ -3135,139 +3837,232 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) kfree(table); } -static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +#define EFX_EF10_FILTER_DO_MARK_OLD(id) \ + if (id != EFX_EF10_FILTER_ID_INVALID) { \ + filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \ + WARN_ON(!table->entry[filter_idx].spec); \ + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \ + } +static void efx_ef10_filter_mark_old(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; - struct net_device *net_dev = efx->net_dev; - struct efx_filter_spec spec; - bool remove_failed = false; - struct netdev_hw_addr *uc; - struct netdev_hw_addr *mc; - unsigned int filter_idx; - int i, n, rc; + unsigned int filter_idx, i; - if (!efx_dev_registered(efx)) + if (!table) return; /* Mark old filters that may need to be removed */ spin_lock_bh(&efx->filter_lock); - n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count; - for (i = 0; i < n; i++) { - filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS; - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; - } - n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count; - for (i = 0; i < n; i++) { - filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS; - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; - } + for (i = 0; i < table->dev_uc_count; i++) + EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id); + for (i = 0; i < table->dev_mc_count; i++) + EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id); + EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id); + EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id); + EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id); spin_unlock_bh(&efx->filter_lock); +} +#undef EFX_EF10_FILTER_DO_MARK_OLD - /* Copy/convert the address lists; add the primary station - * address and broadcast address - */ - netif_addr_lock_bh(net_dev); - if (net_dev->flags & IFF_PROMISC || - netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) { - table->dev_uc_count = -1; - } else { - table->dev_uc_count = 1 + netdev_uc_count(net_dev); - ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); - i = 1; - netdev_for_each_uc_addr(uc, net_dev) { - ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); - i++; +static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct netdev_hw_addr *uc; + int addr_count; + unsigned int i; + + table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; + addr_count = netdev_uc_count(net_dev); + if (net_dev->flags & IFF_PROMISC) + *promisc = true; + table->dev_uc_count = 1 + addr_count; + ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); + i = 1; + netdev_for_each_uc_addr(uc, net_dev) { + if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { + *promisc = true; + break; } + ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); + table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID; + i++; } - if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || - netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) { - table->dev_mc_count = -1; - } else { - table->dev_mc_count = 1 + netdev_mc_count(net_dev); - eth_broadcast_addr(table->dev_mc_list[0].addr); - i = 1; - netdev_for_each_mc_addr(mc, net_dev) { - ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); - i++; +} + +static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct netdev_hw_addr *mc; + unsigned int i, addr_count; + + table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + table->bcast_id = EFX_EF10_FILTER_ID_INVALID; + if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) + *promisc = true; + + addr_count = netdev_mc_count(net_dev); + i = 0; + netdev_for_each_mc_addr(mc, net_dev) { + if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { + *promisc = true; + break; } + ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); + table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID; + i++; } - netif_addr_unlock_bh(net_dev); - /* Insert/renew unicast filters */ - if (table->dev_uc_count >= 0) { - for (i = 0; i < table->dev_uc_count; i++) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - table->dev_uc_list[i].addr); - rc = efx_ef10_filter_insert(efx, &spec, true); - if (rc < 0) { - /* Fall back to unicast-promisc */ - while (i--) - efx_ef10_filter_remove_safe( + table->dev_mc_count = i; +} + +static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, + bool multicast, bool rollback) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_dev_addr *addr_list; + enum efx_filter_flags filter_flags; + struct efx_filter_spec spec; + u8 baddr[ETH_ALEN]; + unsigned int i, j; + int addr_count; + int rc; + + if (multicast) { + addr_list = table->dev_mc_list; + addr_count = table->dev_mc_count; + } else { + addr_list = table->dev_uc_list; + addr_count = table->dev_uc_count; + } + + filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; + + /* Insert/renew filters */ + for (i = 0; i < addr_count; i++) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, + addr_list[i].addr); + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + if (rollback) { + netif_info(efx, drv, efx->net_dev, + "efx_ef10_filter_insert failed rc=%d\n", + rc); + /* Fall back to promiscuous */ + for (j = 0; j < i; j++) { + if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) + continue; + efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - table->dev_uc_list[i].id); - table->dev_uc_count = -1; - break; + addr_list[j].id); + addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; + } + return rc; + } else { + /* mark as not inserted, and carry on */ + rc = EFX_EF10_FILTER_ID_INVALID; } - table->dev_uc_list[i].id = rc; } + addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc); } - if (table->dev_uc_count < 0) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); - efx_filter_set_uc_def(&spec); + + if (multicast && rollback) { + /* Also need an Ethernet broadcast filter */ + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); + eth_broadcast_addr(baddr); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - WARN_ON(1); - table->dev_uc_count = 0; + netif_warn(efx, drv, efx->net_dev, + "Broadcast filter insert failed rc=%d\n", rc); + /* Fall back to promiscuous */ + for (j = 0; j < i; j++) { + if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) + continue; + efx_ef10_filter_remove_unsafe( + efx, EFX_FILTER_PRI_AUTO, + addr_list[j].id); + addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; + } + return rc; } else { - table->dev_uc_list[0].id = rc; + table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); } } - /* Insert/renew multicast filters */ - if (table->dev_mc_count >= 0) { - for (i = 0; i < table->dev_mc_count; i++) { + return 0; +} + +static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, + bool rollback) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + enum efx_filter_flags filter_flags; + struct efx_filter_spec spec; + u8 baddr[ETH_ALEN]; + int rc; + + filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); + + if (multicast) + efx_filter_set_mc_def(&spec); + else + efx_filter_set_uc_def(&spec); + + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + netif_warn(efx, drv, efx->net_dev, + "%scast mismatch filter insert failed rc=%d\n", + multicast ? "Multi" : "Uni", rc); + } else if (multicast) { + table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc); + if (!nic_data->workaround_26807) { + /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); + filter_flags, 0); + eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - table->dev_mc_list[i].addr); + baddr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - /* Fall back to multicast-promisc */ - while (i--) - efx_ef10_filter_remove_safe( - efx, EFX_FILTER_PRI_AUTO, - table->dev_mc_list[i].id); - table->dev_mc_count = -1; - break; + netif_warn(efx, drv, efx->net_dev, + "Broadcast filter insert failed rc=%d\n", + rc); + if (rollback) { + /* Roll back the mc_def filter */ + efx_ef10_filter_remove_unsafe( + efx, EFX_FILTER_PRI_AUTO, + table->mcdef_id); + table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + return rc; + } + } else { + table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); } - table->dev_mc_list[i].id = rc; - } - } - if (table->dev_mc_count < 0) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); - efx_filter_set_mc_def(&spec); - rc = efx_ef10_filter_insert(efx, &spec, true); - if (rc < 0) { - WARN_ON(1); - table->dev_mc_count = 0; - } else { - table->dev_mc_list[0].id = rc; } + rc = 0; + } else { + table->ucdef_id = rc; + rc = 0; } + return rc; +} + +/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD + * flag or removes these filters, we don't need to hold the filter_lock while + * scanning for these filters. + */ +static void efx_ef10_filter_remove_old(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + bool remove_failed = false; + int i; - /* Remove filters that weren't renewed. Since nothing else - * changes the AUTO_OLD flag or removes these filters, we - * don't need to hold the filter_lock while scanning for - * these filters. - */ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { if (ACCESS_ONCE(table->entry[i].spec) & EFX_EF10_FILTER_FLAG_AUTO_OLD) { @@ -3280,6 +4075,230 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) WARN_ON(remove_failed); } +static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u8 mac_old[ETH_ALEN]; + int rc, rc2; + + /* Only reconfigure a PF-created vport */ + if (is_zero_ether_addr(nic_data->vport_mac)) + return 0; + + efx_device_detach_sync(efx); + efx_net_stop(efx->net_dev); + down_write(&efx->filter_sem); + efx_ef10_filter_table_remove(efx); + up_write(&efx->filter_sem); + + rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id); + if (rc) + goto restore_filters; + + ether_addr_copy(mac_old, nic_data->vport_mac); + rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id, + nic_data->vport_mac); + if (rc) + goto restore_vadaptor; + + rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, + efx->net_dev->dev_addr); + if (!rc) { + ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); + } else { + rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old); + if (rc2) { + /* Failed to add original MAC, so clear vport_mac */ + eth_zero_addr(nic_data->vport_mac); + goto reset_nic; + } + } + +restore_vadaptor: + rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + if (rc2) + goto reset_nic; +restore_filters: + down_write(&efx->filter_sem); + rc2 = efx_ef10_filter_table_probe(efx); + up_write(&efx->filter_sem); + if (rc2) + goto reset_nic; + + rc2 = efx_net_open(efx->net_dev); + if (rc2) + goto reset_nic; + + netif_device_attach(efx->net_dev); + + return rc; + +reset_nic: + netif_err(efx, drv, efx->net_dev, + "Failed to restore when changing MAC address - scheduling reset\n"); + efx_schedule_reset(efx, RESET_TYPE_DATAPATH); + + return rc ? rc : rc2; +} + +/* Caller must hold efx->filter_sem for read if race against + * efx_ef10_filter_table_remove() is possible + */ +static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; + bool uc_promisc = false, mc_promisc = false; + + if (!efx_dev_registered(efx)) + return; + + if (!table) + return; + + efx_ef10_filter_mark_old(efx); + + /* Copy/convert the address lists; add the primary station + * address and broadcast address + */ + netif_addr_lock_bh(net_dev); + efx_ef10_filter_uc_addr_list(efx, &uc_promisc); + efx_ef10_filter_mc_addr_list(efx, &mc_promisc); + netif_addr_unlock_bh(net_dev); + + /* Insert/renew unicast filters */ + if (uc_promisc) { + efx_ef10_filter_insert_def(efx, false, false); + efx_ef10_filter_insert_addr_list(efx, false, false); + } else { + /* If any of the filters failed to insert, fall back to + * promiscuous mode - add in the uc_def filter. But keep + * our individual unicast filters. + */ + if (efx_ef10_filter_insert_addr_list(efx, false, false)) + efx_ef10_filter_insert_def(efx, false, false); + } + + /* Insert/renew multicast filters */ + /* If changing promiscuous state with cascaded multicast filters, remove + * old filters first, so that packets are dropped rather than duplicated + */ + if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc) + efx_ef10_filter_remove_old(efx); + if (mc_promisc) { + if (nic_data->workaround_26807) { + /* If we failed to insert promiscuous filters, rollback + * and fall back to individual multicast filters + */ + if (efx_ef10_filter_insert_def(efx, true, true)) { + /* Changing promisc state, so remove old filters */ + efx_ef10_filter_remove_old(efx); + efx_ef10_filter_insert_addr_list(efx, true, false); + } + } else { + /* If we failed to insert promiscuous filters, don't + * rollback. Regardless, also insert the mc_list + */ + efx_ef10_filter_insert_def(efx, true, false); + efx_ef10_filter_insert_addr_list(efx, true, false); + } + } else { + /* If any filters failed to insert, rollback and fall back to + * promiscuous mode - mc_def filter and maybe broadcast. If + * that fails, roll back again and insert as many of our + * individual multicast filters as we can. + */ + if (efx_ef10_filter_insert_addr_list(efx, true, true)) { + /* Changing promisc state, so remove old filters */ + if (nic_data->workaround_26807) + efx_ef10_filter_remove_old(efx); + if (efx_ef10_filter_insert_def(efx, true, true)) + efx_ef10_filter_insert_addr_list(efx, true, false); + } + } + + efx_ef10_filter_remove_old(efx); + efx->mc_promisc = mc_promisc; +} + +static int efx_ef10_set_mac_address(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + bool was_enabled = efx->port_enabled; + int rc; + + efx_device_detach_sync(efx); + efx_net_stop(efx->net_dev); + down_write(&efx->filter_sem); + efx_ef10_filter_table_remove(efx); + + ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), + efx->net_dev->dev_addr); + MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, + nic_data->vport_id); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, + sizeof(inbuf), NULL, 0, NULL); + + efx_ef10_filter_table_probe(efx); + up_write(&efx->filter_sem); + if (was_enabled) + efx_net_open(efx->net_dev); + netif_device_attach(efx->net_dev); + +#ifdef CONFIG_SFC_SRIOV + if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { + struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; + + if (rc == -EPERM) { + struct efx_nic *efx_pf; + + /* Switch to PF and change MAC address on vport */ + efx_pf = pci_get_drvdata(pci_dev_pf); + + rc = efx_ef10_sriov_set_vf_mac(efx_pf, + nic_data->vf_index, + efx->net_dev->dev_addr); + } else if (!rc) { + struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); + struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; + unsigned int i; + + /* MAC address successfully changed by VF (with MAC + * spoofing) so update the parent PF if possible. + */ + for (i = 0; i < efx_pf->vf_count; ++i) { + struct ef10_vf *vf = nic_data->vf + i; + + if (vf->efx == efx) { + ether_addr_copy(vf->mac, + efx->net_dev->dev_addr); + return 0; + } + } + } + } else +#endif + if (rc == -EPERM) { + netif_err(efx, drv, efx->net_dev, + "Cannot change MAC address; use sfboot to enable" + " mac-spoofing on this interface\n"); + } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { + /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC + * fall-back to the method of changing the MAC address on the + * vport. This only applies to PFs because such versions of + * MCFW do not support VFs. + */ + rc = efx_ef10_vport_set_mac_address(efx); + } else { + efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, + sizeof(inbuf), NULL, 0, rc); + } + + return rc; +} + static int efx_ef10_mac_reconfigure(struct efx_nic *efx) { efx_ef10_filter_sync_rx_mode(efx); @@ -3287,6 +4306,13 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx) return efx_mcdi_set_mac(efx); } +static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx) +{ + efx_ef10_filter_sync_rx_mode(efx); + + return 0; +} + static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) { MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); @@ -3364,6 +4390,8 @@ efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); out: + if (rc == -EPERM) + rc = 0; rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); return rc ? rc : rc2; } @@ -3494,6 +4522,9 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); } +static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx, + u32 host_time) {} + static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, bool temp) { @@ -3571,6 +4602,12 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, return 0; } +static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx, + struct hwtstamp_config *init) +{ + return -EOPNOTSUPP; +} + static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, struct hwtstamp_config *init) { @@ -3607,14 +4644,119 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, } } +const struct efx_nic_type efx_hunt_a0_vf_nic_type = { + .is_vf = true, + .mem_bar = EFX_MEM_VF_BAR, + .mem_map_size = efx_ef10_mem_map_size, + .probe = efx_ef10_probe_vf, + .remove = efx_ef10_remove, + .dimension_resources = efx_ef10_dimension_resources, + .init = efx_ef10_init_nic, + .fini = efx_port_dummy_op_void, + .map_reset_reason = efx_ef10_map_reset_reason, + .map_reset_flags = efx_ef10_map_reset_flags, + .reset = efx_ef10_reset, + .probe_port = efx_mcdi_port_probe, + .remove_port = efx_mcdi_port_remove, + .fini_dmaq = efx_ef10_fini_dmaq, + .prepare_flr = efx_ef10_prepare_flr, + .finish_flr = efx_port_dummy_op_void, + .describe_stats = efx_ef10_describe_stats, + .update_stats = efx_ef10_update_stats_vf, + .start_stats = efx_port_dummy_op_void, + .pull_stats = efx_port_dummy_op_void, + .stop_stats = efx_port_dummy_op_void, + .set_id_led = efx_mcdi_set_id_led, + .push_irq_moderation = efx_ef10_push_irq_moderation, + .reconfigure_mac = efx_ef10_mac_reconfigure_vf, + .check_mac_fault = efx_mcdi_mac_check_fault, + .reconfigure_port = efx_mcdi_port_reconfigure, + .get_wol = efx_ef10_get_wol_vf, + .set_wol = efx_ef10_set_wol_vf, + .resume_wol = efx_port_dummy_op_void, + .mcdi_request = efx_ef10_mcdi_request, + .mcdi_poll_response = efx_ef10_mcdi_poll_response, + .mcdi_read_response = efx_ef10_mcdi_read_response, + .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, + .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, + .irq_enable_master = efx_port_dummy_op_void, + .irq_test_generate = efx_ef10_irq_test_generate, + .irq_disable_non_ev = efx_port_dummy_op_void, + .irq_handle_msi = efx_ef10_msi_interrupt, + .irq_handle_legacy = efx_ef10_legacy_interrupt, + .tx_probe = efx_ef10_tx_probe, + .tx_init = efx_ef10_tx_init, + .tx_remove = efx_ef10_tx_remove, + .tx_write = efx_ef10_tx_write, + .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, + .rx_probe = efx_ef10_rx_probe, + .rx_init = efx_ef10_rx_init, + .rx_remove = efx_ef10_rx_remove, + .rx_write = efx_ef10_rx_write, + .rx_defer_refill = efx_ef10_rx_defer_refill, + .ev_probe = efx_ef10_ev_probe, + .ev_init = efx_ef10_ev_init, + .ev_fini = efx_ef10_ev_fini, + .ev_remove = efx_ef10_ev_remove, + .ev_process = efx_ef10_ev_process, + .ev_read_ack = efx_ef10_ev_read_ack, + .ev_test_generate = efx_ef10_ev_test_generate, + .filter_table_probe = efx_ef10_filter_table_probe, + .filter_table_restore = efx_ef10_filter_table_restore, + .filter_table_remove = efx_ef10_filter_table_remove, + .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, + .filter_insert = efx_ef10_filter_insert, + .filter_remove_safe = efx_ef10_filter_remove_safe, + .filter_get_safe = efx_ef10_filter_get_safe, + .filter_clear_rx = efx_ef10_filter_clear_rx, + .filter_count_rx_used = efx_ef10_filter_count_rx_used, + .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_insert = efx_ef10_filter_rfs_insert, + .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, +#endif +#ifdef CONFIG_SFC_MTD + .mtd_probe = efx_port_dummy_op_int, +#endif + .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, + .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, +#ifdef CONFIG_SFC_SRIOV + .vswitching_probe = efx_ef10_vswitching_probe_vf, + .vswitching_restore = efx_ef10_vswitching_restore_vf, + .vswitching_remove = efx_ef10_vswitching_remove_vf, + .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id, +#endif + .get_mac_address = efx_ef10_get_mac_address_vf, + .set_mac_address = efx_ef10_set_mac_address, + + .revision = EFX_REV_HUNT_A0, + .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), + .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, + .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, + .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, + .can_rx_scatter = true, + .always_rx_scatter = true, + .max_interrupt_mode = EFX_INT_MODE_MSIX, + .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, + .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXHASH | NETIF_F_NTUPLE), + .mcdi_max_ver = 2, + .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, + .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | + 1 << HWTSTAMP_FILTER_ALL, +}; + const struct efx_nic_type efx_hunt_a0_nic_type = { + .is_vf = false, + .mem_bar = EFX_MEM_BAR, .mem_map_size = efx_ef10_mem_map_size, - .probe = efx_ef10_probe, + .probe = efx_ef10_probe_pf, .remove = efx_ef10_remove, .dimension_resources = efx_ef10_dimension_resources, .init = efx_ef10_init_nic, .fini = efx_port_dummy_op_void, - .map_reset_reason = efx_mcdi_map_reset_reason, + .map_reset_reason = efx_ef10_map_reset_reason, .map_reset_flags = efx_ef10_map_reset_flags, .reset = efx_ef10_reset, .probe_port = efx_mcdi_port_probe, @@ -3623,7 +4765,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .prepare_flr = efx_ef10_prepare_flr, .finish_flr = efx_port_dummy_op_void, .describe_stats = efx_ef10_describe_stats, - .update_stats = efx_ef10_update_stats, + .update_stats = efx_ef10_update_stats_pf, .start_stats = efx_mcdi_mac_start_stats, .pull_stats = efx_mcdi_mac_pull_stats, .stop_stats = efx_mcdi_mac_stop_stats, @@ -3641,6 +4783,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .mcdi_poll_response = efx_ef10_mcdi_poll_response, .mcdi_read_response = efx_ef10_mcdi_read_response, .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, + .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, .irq_enable_master = efx_port_dummy_op_void, .irq_test_generate = efx_ef10_irq_test_generate, .irq_disable_non_ev = efx_port_dummy_op_void, @@ -3650,7 +4793,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .tx_init = efx_ef10_tx_init, .tx_remove = efx_ef10_tx_remove, .tx_write = efx_ef10_tx_write, - .rx_push_rss_config = efx_ef10_rx_push_rss_config, + .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, .rx_probe = efx_ef10_rx_probe, .rx_init = efx_ef10_rx_init, .rx_remove = efx_ef10_rx_remove, @@ -3689,11 +4832,24 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .ptp_write_host_time = efx_ef10_ptp_write_host_time, .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, +#ifdef CONFIG_SFC_SRIOV + .sriov_configure = efx_ef10_sriov_configure, .sriov_init = efx_ef10_sriov_init, .sriov_fini = efx_ef10_sriov_fini, - .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed, .sriov_wanted = efx_ef10_sriov_wanted, .sriov_reset = efx_ef10_sriov_reset, + .sriov_flr = efx_ef10_sriov_flr, + .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, + .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, + .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, + .sriov_get_vf_config = efx_ef10_sriov_get_vf_config, + .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state, + .vswitching_probe = efx_ef10_vswitching_probe_pf, + .vswitching_restore = efx_ef10_vswitching_restore_pf, + .vswitching_remove = efx_ef10_vswitching_remove_pf, +#endif + .get_mac_address = efx_ef10_get_mac_address_pf, + .set_mac_address = efx_ef10_set_mac_address, .revision = EFX_REV_HUNT_A0, .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), diff --git a/kernel/drivers/net/ethernet/sfc/ef10_sriov.c b/kernel/drivers/net/ethernet/sfc/ef10_sriov.c new file mode 100644 index 000000000..3c17f274e --- /dev/null +++ b/kernel/drivers/net/ethernet/sfc/ef10_sriov.c @@ -0,0 +1,746 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2015 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include <linux/pci.h> +#include <linux/module.h> +#include "net_driver.h" +#include "ef10_sriov.h" +#include "efx.h" +#include "nic.h" +#include "mcdi_pcol.h" + +static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id, + unsigned int vf_fn) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_EVB_PORT_ASSIGN_IN_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + MCDI_SET_DWORD(inbuf, EVB_PORT_ASSIGN_IN_PORT_ID, port_id); + MCDI_POPULATE_DWORD_2(inbuf, EVB_PORT_ASSIGN_IN_FUNCTION, + EVB_PORT_ASSIGN_IN_PF, nic_data->pf_index, + EVB_PORT_ASSIGN_IN_VF, vf_fn); + + return efx_mcdi_rpc(efx, MC_CMD_EVB_PORT_ASSIGN, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id, + unsigned int vswitch_type) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_ALLOC_IN_LEN); + int rc; + + MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID, port_id); + MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_TYPE, vswitch_type); + MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 2); + MCDI_POPULATE_DWORD_1(inbuf, VSWITCH_ALLOC_IN_FLAGS, + VSWITCH_ALLOC_IN_FLAG_AUTO_PORT, 0); + + /* Quietly try to allocate 2 VLAN tags */ + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VSWITCH_ALLOC, inbuf, sizeof(inbuf), + NULL, 0, NULL); + + /* If 2 VLAN tags is too many, revert to trying with 1 VLAN tags */ + if (rc == -EPROTO) { + MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 1); + rc = efx_mcdi_rpc(efx, MC_CMD_VSWITCH_ALLOC, inbuf, + sizeof(inbuf), NULL, 0, NULL); + } else if (rc) { + efx_mcdi_display_error(efx, MC_CMD_VSWITCH_ALLOC, + MC_CMD_VSWITCH_ALLOC_IN_LEN, + NULL, 0, rc); + } + return rc; +} + +static int efx_ef10_vswitch_free(struct efx_nic *efx, unsigned int port_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_FREE_IN_LEN); + + MCDI_SET_DWORD(inbuf, VSWITCH_FREE_IN_UPSTREAM_PORT_ID, port_id); + + return efx_mcdi_rpc(efx, MC_CMD_VSWITCH_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static int efx_ef10_vport_alloc(struct efx_nic *efx, + unsigned int port_id_in, + unsigned int vport_type, + u16 vlan, + unsigned int *port_id_out) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ALLOC_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_ALLOC_OUT_LEN); + size_t outlen; + int rc; + + EFX_WARN_ON_PARANOID(!port_id_out); + + MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_UPSTREAM_PORT_ID, port_id_in); + MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_TYPE, vport_type); + MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_NUM_VLAN_TAGS, + (vlan != EFX_EF10_NO_VLAN)); + MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_FLAGS, + VPORT_ALLOC_IN_FLAG_AUTO_PORT, 0); + if (vlan != EFX_EF10_NO_VLAN) + MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_VLAN_TAGS, + VPORT_ALLOC_IN_VLAN_TAG_0, vlan); + + rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_ALLOC, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_VPORT_ALLOC_OUT_LEN) + return -EIO; + + *port_id_out = MCDI_DWORD(outbuf, VPORT_ALLOC_OUT_VPORT_ID); + return 0; +} + +static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_FREE_IN_LEN); + + MCDI_SET_DWORD(inbuf, VPORT_FREE_IN_VPORT_ID, port_id); + + return efx_mcdi_rpc(efx, MC_CMD_VPORT_FREE, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int i; + + if (!nic_data->vf) + return; + + for (i = 0; i < efx->vf_count; i++) { + struct ef10_vf *vf = nic_data->vf + i; + + /* If VF is assigned, do not free the vport */ + if (vf->pci_dev && + vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) + continue; + + if (vf->vport_assigned) { + efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, i); + vf->vport_assigned = 0; + } + + if (!is_zero_ether_addr(vf->mac)) { + efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac); + eth_zero_addr(vf->mac); + } + + if (vf->vport_id) { + efx_ef10_vport_free(efx, vf->vport_id); + vf->vport_id = 0; + } + + vf->efx = NULL; + } +} + +static void efx_ef10_sriov_free_vf_vswitching(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + efx_ef10_sriov_free_vf_vports(efx); + kfree(nic_data->vf); + nic_data->vf = NULL; +} + +static int efx_ef10_sriov_assign_vf_vport(struct efx_nic *efx, + unsigned int vf_i) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct ef10_vf *vf = nic_data->vf + vf_i; + int rc; + + if (WARN_ON_ONCE(!nic_data->vf)) + return -EOPNOTSUPP; + + rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED, + MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL, + vf->vlan, &vf->vport_id); + if (rc) + return rc; + + rc = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac); + if (rc) { + eth_zero_addr(vf->mac); + return rc; + } + + rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i); + if (rc) + return rc; + + vf->vport_assigned = 1; + return 0; +} + +static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int i; + int rc; + + nic_data->vf = kcalloc(efx->vf_count, sizeof(struct ef10_vf), + GFP_KERNEL); + if (!nic_data->vf) + return -ENOMEM; + + for (i = 0; i < efx->vf_count; i++) { + random_ether_addr(nic_data->vf[i].mac); + nic_data->vf[i].efx = NULL; + nic_data->vf[i].vlan = EFX_EF10_NO_VLAN; + + rc = efx_ef10_sriov_assign_vf_vport(efx, i); + if (rc) + goto fail; + } + + return 0; +fail: + efx_ef10_sriov_free_vf_vports(efx); + kfree(nic_data->vf); + nic_data->vf = NULL; + return rc; +} + +static int efx_ef10_sriov_restore_vf_vswitching(struct efx_nic *efx) +{ + unsigned int i; + int rc; + + for (i = 0; i < efx->vf_count; i++) { + rc = efx_ef10_sriov_assign_vf_vport(efx, i); + if (rc) + goto fail; + } + + return 0; +fail: + efx_ef10_sriov_free_vf_vswitching(efx); + return rc; +} + +/* On top of the default firmware vswitch setup, create a VEB vswitch and + * expansion vport for use by this function. + */ +int efx_ef10_vswitching_probe_pf(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; + int rc; + + if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) { + /* vswitch not needed as we have no VFs */ + efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + return 0; + } + + rc = efx_ef10_vswitch_alloc(efx, EVB_PORT_ID_ASSIGNED, + MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB); + if (rc) + goto fail1; + + rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED, + MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL, + EFX_EF10_NO_VLAN, &nic_data->vport_id); + if (rc) + goto fail2; + + rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr); + if (rc) + goto fail3; + ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr); + + rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + if (rc) + goto fail4; + + return 0; +fail4: + efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac); + eth_zero_addr(nic_data->vport_mac); +fail3: + efx_ef10_vport_free(efx, nic_data->vport_id); + nic_data->vport_id = EVB_PORT_ID_ASSIGNED; +fail2: + efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED); +fail1: + return rc; +} + +int efx_ef10_vswitching_probe_vf(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); +} + +int efx_ef10_vswitching_restore_pf(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + if (!nic_data->must_probe_vswitching) + return 0; + + rc = efx_ef10_vswitching_probe_pf(efx); + if (rc) + goto fail; + + rc = efx_ef10_sriov_restore_vf_vswitching(efx); + if (rc) + goto fail; + + nic_data->must_probe_vswitching = false; +fail: + return rc; +} + +int efx_ef10_vswitching_restore_vf(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + int rc; + + if (!nic_data->must_probe_vswitching) + return 0; + + rc = efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED); + if (rc) + return rc; + + nic_data->must_probe_vswitching = false; + return 0; +} + +void efx_ef10_vswitching_remove_pf(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + efx_ef10_sriov_free_vf_vswitching(efx); + + efx_ef10_vadaptor_free(efx, nic_data->vport_id); + + if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED) + return; /* No vswitch was ever created */ + + if (!is_zero_ether_addr(nic_data->vport_mac)) { + efx_ef10_vport_del_mac(efx, nic_data->vport_id, + efx->net_dev->dev_addr); + eth_zero_addr(nic_data->vport_mac); + } + efx_ef10_vport_free(efx, nic_data->vport_id); + nic_data->vport_id = EVB_PORT_ID_ASSIGNED; + + /* Only free the vswitch if no VFs are assigned */ + if (!pci_vfs_assigned(efx->pci_dev)) + efx_ef10_vswitch_free(efx, nic_data->vport_id); +} + +void efx_ef10_vswitching_remove_vf(struct efx_nic *efx) +{ + efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED); +} + +static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs) +{ + int rc = 0; + struct pci_dev *dev = efx->pci_dev; + + efx->vf_count = num_vfs; + + rc = efx_ef10_sriov_alloc_vf_vswitching(efx); + if (rc) + goto fail1; + + rc = pci_enable_sriov(dev, num_vfs); + if (rc) + goto fail2; + + return 0; +fail2: + efx_ef10_sriov_free_vf_vswitching(efx); +fail1: + efx->vf_count = 0; + netif_err(efx, probe, efx->net_dev, + "Failed to enable SRIOV VFs\n"); + return rc; +} + +static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) +{ + struct pci_dev *dev = efx->pci_dev; + unsigned int vfs_assigned = 0; + + vfs_assigned = pci_vfs_assigned(dev); + + if (vfs_assigned && !force) { + netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " + "please detach them before disabling SR-IOV\n"); + return -EBUSY; + } + + if (!vfs_assigned) + pci_disable_sriov(dev); + + efx_ef10_sriov_free_vf_vswitching(efx); + efx->vf_count = 0; + return 0; +} + +int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs) +{ + if (num_vfs == 0) + return efx_ef10_pci_sriov_disable(efx, false); + else + return efx_ef10_pci_sriov_enable(efx, num_vfs); +} + +int efx_ef10_sriov_init(struct efx_nic *efx) +{ + return 0; +} + +void efx_ef10_sriov_fini(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int i; + int rc; + + if (!nic_data->vf) { + /* Remove any un-assigned orphaned VFs */ + if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev)) + pci_disable_sriov(efx->pci_dev); + return; + } + + /* Remove any VFs in the host */ + for (i = 0; i < efx->vf_count; ++i) { + struct efx_nic *vf_efx = nic_data->vf[i].efx; + + if (vf_efx) + vf_efx->pci_dev->driver->remove(vf_efx->pci_dev); + } + + rc = efx_ef10_pci_sriov_disable(efx, true); + if (rc) + netif_dbg(efx, drv, efx->net_dev, + "Disabling SRIOV was not successful rc=%d\n", rc); + else + netif_dbg(efx, drv, efx->net_dev, "SRIOV disabled\n"); +} + +static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id, + u8 *mac) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); + ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); + + rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); + + return rc; +} + +int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct ef10_vf *vf; + int rc; + + if (!nic_data->vf) + return -EOPNOTSUPP; + + if (vf_i >= efx->vf_count) + return -EINVAL; + vf = nic_data->vf + vf_i; + + if (vf->efx) { + efx_device_detach_sync(vf->efx); + efx_net_stop(vf->efx->net_dev); + + down_write(&vf->efx->filter_sem); + vf->efx->type->filter_table_remove(vf->efx); + + rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED); + if (rc) { + up_write(&vf->efx->filter_sem); + return rc; + } + } + + rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i); + if (rc) + return rc; + + if (!is_zero_ether_addr(vf->mac)) { + rc = efx_ef10_vport_del_vf_mac(efx, vf->vport_id, vf->mac); + if (rc) + return rc; + } + + if (!is_zero_ether_addr(mac)) { + rc = efx_ef10_vport_add_mac(efx, vf->vport_id, mac); + if (rc) { + eth_zero_addr(vf->mac); + goto fail; + } + if (vf->efx) + ether_addr_copy(vf->efx->net_dev->dev_addr, mac); + } + + ether_addr_copy(vf->mac, mac); + + rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i); + if (rc) + goto fail; + + if (vf->efx) { + /* VF cannot use the vport_id that the PF created */ + rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED); + if (rc) { + up_write(&vf->efx->filter_sem); + return rc; + } + vf->efx->type->filter_table_probe(vf->efx); + up_write(&vf->efx->filter_sem); + efx_net_open(vf->efx->net_dev); + netif_device_attach(vf->efx->net_dev); + } + + return 0; + +fail: + memset(vf->mac, 0, ETH_ALEN); + return rc; +} + +int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, + u8 qos) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct ef10_vf *vf; + u16 old_vlan, new_vlan; + int rc = 0, rc2 = 0; + + if (vf_i >= efx->vf_count) + return -EINVAL; + if (qos != 0) + return -EINVAL; + + vf = nic_data->vf + vf_i; + + new_vlan = (vlan == 0) ? EFX_EF10_NO_VLAN : vlan; + if (new_vlan == vf->vlan) + return 0; + + if (vf->efx) { + efx_device_detach_sync(vf->efx); + efx_net_stop(vf->efx->net_dev); + + down_write(&vf->efx->filter_sem); + vf->efx->type->filter_table_remove(vf->efx); + + rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED); + if (rc) + goto restore_filters; + } + + if (vf->vport_assigned) { + rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i); + if (rc) { + netif_warn(efx, drv, efx->net_dev, + "Failed to change vlan on VF %d.\n", vf_i); + netif_warn(efx, drv, efx->net_dev, + "This is likely because the VF is bound to a driver in a VM.\n"); + netif_warn(efx, drv, efx->net_dev, + "Please unload the driver in the VM.\n"); + goto restore_vadaptor; + } + vf->vport_assigned = 0; + } + + if (!is_zero_ether_addr(vf->mac)) { + rc = efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac); + if (rc) + goto restore_evb_port; + } + + if (vf->vport_id) { + rc = efx_ef10_vport_free(efx, vf->vport_id); + if (rc) + goto restore_mac; + vf->vport_id = 0; + } + + /* Do the actual vlan change */ + old_vlan = vf->vlan; + vf->vlan = new_vlan; + + /* Restore everything in reverse order */ + rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED, + MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL, + vf->vlan, &vf->vport_id); + if (rc) + goto reset_nic_up_write; + +restore_mac: + if (!is_zero_ether_addr(vf->mac)) { + rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac); + if (rc2) { + eth_zero_addr(vf->mac); + goto reset_nic_up_write; + } + } + +restore_evb_port: + rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i); + if (rc2) + goto reset_nic_up_write; + else + vf->vport_assigned = 1; + +restore_vadaptor: + if (vf->efx) { + rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED); + if (rc2) + goto reset_nic_up_write; + } + +restore_filters: + if (vf->efx) { + rc2 = vf->efx->type->filter_table_probe(vf->efx); + if (rc2) + goto reset_nic_up_write; + + up_write(&vf->efx->filter_sem); + + up_write(&vf->efx->filter_sem); + + rc2 = efx_net_open(vf->efx->net_dev); + if (rc2) + goto reset_nic; + + netif_device_attach(vf->efx->net_dev); + } + return rc; + +reset_nic_up_write: + if (vf->efx) + up_write(&vf->efx->filter_sem); + +reset_nic: + if (vf->efx) { + netif_err(efx, drv, efx->net_dev, + "Failed to restore VF - scheduling reset.\n"); + efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH); + } else { + netif_err(efx, drv, efx->net_dev, + "Failed to restore the VF and cannot reset the VF " + "- VF is not functional.\n"); + netif_err(efx, drv, efx->net_dev, + "Please reload the driver attached to the VF.\n"); + } + + return rc ? rc : rc2; +} + +int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i, + bool spoofchk) +{ + return spoofchk ? -EOPNOTSUPP : 0; +} + +int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i, + int link_state) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + BUILD_BUG_ON(IFLA_VF_LINK_STATE_AUTO != + MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO); + BUILD_BUG_ON(IFLA_VF_LINK_STATE_ENABLE != + MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP); + BUILD_BUG_ON(IFLA_VF_LINK_STATE_DISABLE != + MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN); + MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION, + LINK_STATE_MODE_IN_FUNCTION_PF, + nic_data->pf_index, + LINK_STATE_MODE_IN_FUNCTION_VF, vf_i); + MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE, link_state); + return efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf), + NULL, 0, NULL); /* don't care what old mode was */ +} + +int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, + struct ifla_vf_info *ivf) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_LINK_STATE_MODE_OUT_LEN); + + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct ef10_vf *vf; + size_t outlen; + int rc; + + if (vf_i >= efx->vf_count) + return -EINVAL; + + if (!nic_data->vf) + return -EOPNOTSUPP; + + vf = nic_data->vf + vf_i; + + ivf->vf = vf_i; + ivf->min_tx_rate = 0; + ivf->max_tx_rate = 0; + ether_addr_copy(ivf->mac, vf->mac); + ivf->vlan = (vf->vlan == EFX_EF10_NO_VLAN) ? 0 : vf->vlan; + ivf->qos = 0; + + MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION, + LINK_STATE_MODE_IN_FUNCTION_PF, + nic_data->pf_index, + LINK_STATE_MODE_IN_FUNCTION_VF, vf_i); + MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE, + MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE); + rc = efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_LINK_STATE_MODE_OUT_LEN) + return -EIO; + ivf->linkstate = MCDI_DWORD(outbuf, LINK_STATE_MODE_OUT_OLD_MODE); + + return 0; +} + +int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx, + struct netdev_phys_item_id *ppid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!is_valid_ether_addr(nic_data->port_id)) + return -EOPNOTSUPP; + + ppid->id_len = ETH_ALEN; + memcpy(ppid->id, nic_data->port_id, ppid->id_len); + + return 0; +} diff --git a/kernel/drivers/net/ethernet/sfc/ef10_sriov.h b/kernel/drivers/net/ethernet/sfc/ef10_sriov.h new file mode 100644 index 000000000..6d25b92cb --- /dev/null +++ b/kernel/drivers/net/ethernet/sfc/ef10_sriov.h @@ -0,0 +1,75 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2015 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EF10_SRIOV_H +#define EF10_SRIOV_H + +#include "net_driver.h" + +/** + * struct ef10_vf - PF's store of VF data + * @efx: efx_nic struct for the current VF + * @pci_dev: the pci_dev struct for the VF, retained while the VF is assigned + * @vport_id: vport ID for the VF + * @vport_assigned: record whether the vport is currently assigned to the VF + * @mac: MAC address for the VF, zero when address is removed from the vport + * @vlan: Default VLAN for the VF or #EFX_EF10_NO_VLAN + */ +struct ef10_vf { + struct efx_nic *efx; + struct pci_dev *pci_dev; + unsigned int vport_id; + unsigned int vport_assigned; + u8 mac[ETH_ALEN]; + u16 vlan; +#define EFX_EF10_NO_VLAN 0 +}; + +static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) +{ + return false; +} + +int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs); +int efx_ef10_sriov_init(struct efx_nic *efx); +static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} +void efx_ef10_sriov_fini(struct efx_nic *efx); +static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {} + +int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac); + +int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, + u16 vlan, u8 qos); + +int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf, + bool spoofchk); + +int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, + struct ifla_vf_info *ivf); + +int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i, + int link_state); + +int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx, + struct netdev_phys_item_id *ppid); + +int efx_ef10_vswitching_probe_pf(struct efx_nic *efx); +int efx_ef10_vswitching_probe_vf(struct efx_nic *efx); +int efx_ef10_vswitching_restore_pf(struct efx_nic *efx); +int efx_ef10_vswitching_restore_vf(struct efx_nic *efx); +void efx_ef10_vswitching_remove_pf(struct efx_nic *efx); +void efx_ef10_vswitching_remove_vf(struct efx_nic *efx); +int efx_ef10_vport_add_mac(struct efx_nic *efx, + unsigned int port_id, u8 *mac); +int efx_ef10_vport_del_mac(struct efx_nic *efx, + unsigned int port_id, u8 *mac); +int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id); +int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id); + +#endif /* EF10_SRIOV_H */ diff --git a/kernel/drivers/net/ethernet/sfc/efx.c b/kernel/drivers/net/ethernet/sfc/efx.c index 4b00545a3..a3c42a376 100644 --- a/kernel/drivers/net/ethernet/sfc/efx.c +++ b/kernel/drivers/net/ethernet/sfc/efx.c @@ -26,6 +26,7 @@ #include "efx.h" #include "nic.h" #include "selftest.h" +#include "sriov.h" #include "mcdi.h" #include "workarounds.h" @@ -76,6 +77,7 @@ const char *const efx_reset_type_names[] = { [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", [RESET_TYPE_WORLD] = "WORLD", [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", + [RESET_TYPE_DATAPATH] = "DATAPATH", [RESET_TYPE_MC_BIST] = "MC_BIST", [RESET_TYPE_DISABLE] = "DISABLE", [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", @@ -113,9 +115,9 @@ static struct workqueue_struct *reset_workqueue; * * This is only used in MSI-X interrupt mode */ -static bool separate_tx_channels; -module_param(separate_tx_channels, bool, 0444); -MODULE_PARM_DESC(separate_tx_channels, +bool efx_separate_tx_channels; +module_param(efx_separate_tx_channels, bool, 0444); +MODULE_PARM_DESC(efx_separate_tx_channels, "Use separate channels for TX and RX"); /* This is the weight assigned to each of the (per-channel) virtual @@ -243,11 +245,17 @@ static int efx_check_disabled(struct efx_nic *efx) */ static int efx_process_channel(struct efx_channel *channel, int budget) { + struct efx_tx_queue *tx_queue; int spent; if (unlikely(!channel->enabled)) return 0; + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->pkts_compl = 0; + tx_queue->bytes_compl = 0; + } + spent = efx_nic_process_eventq(channel, budget); if (spent && efx_channel_has_rx_queue(channel)) { struct efx_rx_queue *rx_queue = @@ -257,6 +265,14 @@ static int efx_process_channel(struct efx_channel *channel, int budget) efx_fast_push_rx_descriptors(rx_queue, true); } + /* Update BQL */ + efx_for_each_channel_tx_queue(tx_queue, channel) { + if (tx_queue->bytes_compl) { + netdev_tx_completed_queue(tx_queue->core_txq, + tx_queue->pkts_compl, tx_queue->bytes_compl); + } + } + return spent; } @@ -948,6 +964,16 @@ void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) static void efx_fini_port(struct efx_nic *efx); +/* We assume that efx->type->reconfigure_mac will always try to sync RX + * filters and therefore needs to read-lock the filter table against freeing + */ +void efx_mac_reconfigure(struct efx_nic *efx) +{ + down_read(&efx->filter_sem); + efx->type->reconfigure_mac(efx); + up_read(&efx->filter_sem); +} + /* Push loopback/power/transmit disable settings to the PHY, and reconfigure * the MAC appropriately. All other PHY configuration changes are pushed * through phy_op->set_settings(), and pushed asynchronously to the MAC @@ -1001,7 +1027,7 @@ static void efx_mac_work(struct work_struct *data) mutex_lock(&efx->mac_lock); if (efx->port_enabled) - efx->type->reconfigure_mac(efx); + efx_mac_reconfigure(efx); mutex_unlock(&efx->mac_lock); } @@ -1041,11 +1067,11 @@ static int efx_init_port(struct efx_nic *efx) /* Reconfigure the MAC before creating dma queues (required for * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ - efx->type->reconfigure_mac(efx); + efx_mac_reconfigure(efx); /* Ensure the PHY advertises the correct flow control settings */ rc = efx->phy_op->reconfigure(efx); - if (rc) + if (rc && rc != -EPERM) goto fail2; mutex_unlock(&efx->mac_lock); @@ -1067,7 +1093,7 @@ static void efx_start_port(struct efx_nic *efx) efx->port_enabled = true; /* Ensure MAC ingress/egress is enabled */ - efx->type->reconfigure_mac(efx); + efx_mac_reconfigure(efx); mutex_unlock(&efx->mac_lock); } @@ -1200,10 +1226,12 @@ static int efx_init_io(struct efx_nic *efx) struct pci_dev *pci_dev = efx->pci_dev; dma_addr_t dma_mask = efx->type->max_dma_mask; unsigned int mem_map_size = efx->type->mem_map_size(efx); - int rc; + int rc, bar; netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); + bar = efx->type->mem_bar; + rc = pci_enable_device(pci_dev); if (rc) { netif_err(efx, probe, efx->net_dev, @@ -1219,11 +1247,9 @@ static int efx_init_io(struct efx_nic *efx) * masks event though they reject 46 bit masks. */ while (dma_mask > 0x7fffffffUL) { - if (dma_supported(&pci_dev->dev, dma_mask)) { - rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); - if (rc == 0) - break; - } + rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask); + if (rc == 0) + break; dma_mask >>= 1; } if (rc) { @@ -1234,8 +1260,8 @@ static int efx_init_io(struct efx_nic *efx) netif_dbg(efx, probe, efx->net_dev, "using DMA mask %llx\n", (unsigned long long) dma_mask); - efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); - rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); + efx->membase_phys = pci_resource_start(efx->pci_dev, bar); + rc = pci_request_region(pci_dev, bar, "sfc"); if (rc) { netif_err(efx, probe, efx->net_dev, "request for memory BAR failed\n"); @@ -1258,7 +1284,7 @@ static int efx_init_io(struct efx_nic *efx) return 0; fail4: - pci_release_region(efx->pci_dev, EFX_MEM_BAR); + pci_release_region(efx->pci_dev, bar); fail3: efx->membase_phys = 0; fail2: @@ -1269,6 +1295,8 @@ static int efx_init_io(struct efx_nic *efx) static void efx_fini_io(struct efx_nic *efx) { + int bar; + netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); if (efx->membase) { @@ -1277,11 +1305,23 @@ static void efx_fini_io(struct efx_nic *efx) } if (efx->membase_phys) { - pci_release_region(efx->pci_dev, EFX_MEM_BAR); + bar = efx->type->mem_bar; + pci_release_region(efx->pci_dev, bar); efx->membase_phys = 0; } - pci_disable_device(efx->pci_dev); + /* Don't disable bus-mastering if VFs are assigned */ + if (!pci_vfs_assigned(efx->pci_dev)) + pci_disable_device(efx->pci_dev); +} + +void efx_set_default_rx_indir_table(struct efx_nic *efx) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) + efx->rx_indir_table[i] = + ethtool_rxfh_indir_default(i, efx->rss_spread); } static unsigned int efx_wanted_parallelism(struct efx_nic *efx) @@ -1304,7 +1344,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) if (!cpumask_test_cpu(cpu, thread_mask)) { ++count; cpumask_or(thread_mask, thread_mask, - topology_thread_cpumask(cpu)); + topology_sibling_cpumask(cpu)); } } @@ -1314,15 +1354,19 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) /* If RSS is requested for the PF *and* VFs then we can't write RSS * table entries that are inaccessible to VFs */ - if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && - count > efx_vf_size(efx)) { - netif_warn(efx, probe, efx->net_dev, - "Reducing number of RSS channels from %u to %u for " - "VF support. Increase vf-msix-limit to use more " - "channels on the PF.\n", - count, efx_vf_size(efx)); - count = efx_vf_size(efx); +#ifdef CONFIG_SFC_SRIOV + if (efx->type->sriov_wanted) { + if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && + count > efx_vf_size(efx)) { + netif_warn(efx, probe, efx->net_dev, + "Reducing number of RSS channels from %u to %u for " + "VF support. Increase vf-msix-limit to use more " + "channels on the PF.\n", + count, efx_vf_size(efx)); + count = efx_vf_size(efx); + } } +#endif return count; } @@ -1345,7 +1389,7 @@ static int efx_probe_interrupts(struct efx_nic *efx) unsigned int n_channels; n_channels = efx_wanted_parallelism(efx); - if (separate_tx_channels) + if (efx_separate_tx_channels) n_channels *= 2; n_channels += extra_channels; n_channels = min(n_channels, efx->max_channels); @@ -1372,13 +1416,16 @@ static int efx_probe_interrupts(struct efx_nic *efx) efx->n_channels = n_channels; if (n_channels > extra_channels) n_channels -= extra_channels; - if (separate_tx_channels) { - efx->n_tx_channels = max(n_channels / 2, 1U); + if (efx_separate_tx_channels) { + efx->n_tx_channels = min(max(n_channels / 2, + 1U), + efx->max_tx_channels); efx->n_rx_channels = max(n_channels - efx->n_tx_channels, 1U); } else { - efx->n_tx_channels = n_channels; + efx->n_tx_channels = min(n_channels, + efx->max_tx_channels); efx->n_rx_channels = n_channels; } for (i = 0; i < efx->n_channels; i++) @@ -1404,7 +1451,7 @@ static int efx_probe_interrupts(struct efx_nic *efx) /* Assume legacy interrupts */ if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { - efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); + efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); efx->n_rx_channels = 1; efx->n_tx_channels = 1; efx->legacy_irq = efx->pci_dev->irq; @@ -1426,10 +1473,15 @@ static int efx_probe_interrupts(struct efx_nic *efx) } /* RSS might be usable on VFs even if it is disabled on the PF */ - - efx->rss_spread = ((efx->n_rx_channels > 1 || - !efx->type->sriov_wanted(efx)) ? - efx->n_rx_channels : efx_vf_size(efx)); +#ifdef CONFIG_SFC_SRIOV + if (efx->type->sriov_wanted) { + efx->rss_spread = ((efx->n_rx_channels > 1 || + !efx->type->sriov_wanted(efx)) ? + efx->n_rx_channels : efx_vf_size(efx)); + return 0; + } +#endif + efx->rss_spread = efx->n_rx_channels; return 0; } @@ -1573,7 +1625,8 @@ static void efx_set_channels(struct efx_nic *efx) struct efx_tx_queue *tx_queue; efx->tx_channel_offset = - separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; + efx_separate_tx_channels ? + efx->n_channels - efx->n_tx_channels : 0; /* We need to mark which channels really have RX and TX * queues, and adjust the TX queue numbers if we have separate @@ -1593,7 +1646,6 @@ static void efx_set_channels(struct efx_nic *efx) static int efx_probe_nic(struct efx_nic *efx) { - size_t i; int rc; netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); @@ -1603,23 +1655,39 @@ static int efx_probe_nic(struct efx_nic *efx) if (rc) return rc; - /* Determine the number of channels and queues by trying to hook - * in MSI-X interrupts. */ - rc = efx_probe_interrupts(efx); - if (rc) - goto fail1; + do { + if (!efx->max_channels || !efx->max_tx_channels) { + netif_err(efx, drv, efx->net_dev, + "Insufficient resources to allocate" + " any channels\n"); + rc = -ENOSPC; + goto fail1; + } + + /* Determine the number of channels and queues by trying + * to hook in MSI-X interrupts. + */ + rc = efx_probe_interrupts(efx); + if (rc) + goto fail1; - efx_set_channels(efx); + efx_set_channels(efx); - rc = efx->type->dimension_resources(efx); - if (rc) - goto fail2; + /* dimension_resources can fail with EAGAIN */ + rc = efx->type->dimension_resources(efx); + if (rc != 0 && rc != -EAGAIN) + goto fail2; + + if (rc == -EAGAIN) + /* try again with new max_channels */ + efx_remove_interrupts(efx); + + } while (rc == -EAGAIN); if (efx->n_channels > 1) - netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); - for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) - efx->rx_indir_table[i] = - ethtool_rxfh_indir_default(i, efx->rss_spread); + netdev_rss_key_fill(&efx->rx_hash_key, + sizeof(efx->rx_hash_key)); + efx_set_default_rx_indir_table(efx); netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); @@ -1650,10 +1718,11 @@ static int efx_probe_filters(struct efx_nic *efx) int rc; spin_lock_init(&efx->filter_lock); - + init_rwsem(&efx->filter_sem); + down_write(&efx->filter_sem); rc = efx->type->filter_table_probe(efx); if (rc) - return rc; + goto out_unlock; #ifdef CONFIG_RFS_ACCEL if (efx->type->offload_features & NETIF_F_NTUPLE) { @@ -1662,12 +1731,14 @@ static int efx_probe_filters(struct efx_nic *efx) GFP_KERNEL); if (!efx->rps_flow_id) { efx->type->filter_table_remove(efx); - return -ENOMEM; + rc = -ENOMEM; + goto out_unlock; } } #endif - - return 0; +out_unlock: + up_write(&efx->filter_sem); + return rc; } static void efx_remove_filters(struct efx_nic *efx) @@ -1675,12 +1746,16 @@ static void efx_remove_filters(struct efx_nic *efx) #ifdef CONFIG_RFS_ACCEL kfree(efx->rps_flow_id); #endif + down_write(&efx->filter_sem); efx->type->filter_table_remove(efx); + up_write(&efx->filter_sem); } static void efx_restore_filters(struct efx_nic *efx) { + down_read(&efx->filter_sem); efx->type->filter_table_restore(efx); + up_read(&efx->filter_sem); } /************************************************************************** @@ -1712,21 +1787,33 @@ static int efx_probe_all(struct efx_nic *efx) } efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; +#ifdef CONFIG_SFC_SRIOV + rc = efx->type->vswitching_probe(efx); + if (rc) /* not fatal; the PF will still work fine */ + netif_warn(efx, probe, efx->net_dev, + "failed to setup vswitching rc=%d;" + " VFs may not function\n", rc); +#endif + rc = efx_probe_filters(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to create filter tables\n"); - goto fail3; + goto fail4; } rc = efx_probe_channels(efx); if (rc) - goto fail4; + goto fail5; return 0; - fail4: + fail5: efx_remove_filters(efx); + fail4: +#ifdef CONFIG_SFC_SRIOV + efx->type->vswitching_remove(efx); +#endif fail3: efx_remove_port(efx); fail2: @@ -1816,6 +1903,9 @@ static void efx_remove_all(struct efx_nic *efx) { efx_remove_channels(efx); efx_remove_filters(efx); +#ifdef CONFIG_SFC_SRIOV + efx->type->vswitching_remove(efx); +#endif efx_remove_port(efx); efx_remove_nic(efx); } @@ -1970,7 +2060,7 @@ static void efx_init_napi_channel(struct efx_channel *channel) netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, napi_weight); napi_hash_add(&channel->napi_str); - efx_channel_init_lock(channel); + efx_channel_busy_poll_init(channel); } static void efx_init_napi(struct efx_nic *efx) @@ -2033,7 +2123,7 @@ static int efx_busy_poll(struct napi_struct *napi) if (!netif_running(efx->net_dev)) return LL_FLUSH_FAILED; - if (!efx_channel_lock_poll(channel)) + if (!efx_channel_try_lock_poll(channel)) return LL_FLUSH_BUSY; old_rx_packets = channel->rx_queue.rx_packets; @@ -2059,7 +2149,7 @@ static int efx_busy_poll(struct napi_struct *napi) *************************************************************************/ /* Context: process, rtnl_lock() held. */ -static int efx_net_open(struct net_device *net_dev) +int efx_net_open(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); int rc; @@ -2088,7 +2178,7 @@ static int efx_net_open(struct net_device *net_dev) * Note that the kernel will ignore our return code; this method * should really be a void. */ -static int efx_net_stop(struct net_device *net_dev) +int efx_net_stop(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); @@ -2146,7 +2236,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) mutex_lock(&efx->mac_lock); net_dev->mtu = new_mtu; - efx->type->reconfigure_mac(efx); + efx_mac_reconfigure(efx); mutex_unlock(&efx->mac_lock); efx_start_all(efx); @@ -2159,6 +2249,8 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) struct efx_nic *efx = netdev_priv(net_dev); struct sockaddr *addr = data; u8 *new_addr = addr->sa_data; + u8 old_addr[6]; + int rc; if (!is_valid_ether_addr(new_addr)) { netif_err(efx, drv, efx->net_dev, @@ -2167,12 +2259,20 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) return -EADDRNOTAVAIL; } + /* save old address */ + ether_addr_copy(old_addr, net_dev->dev_addr); ether_addr_copy(net_dev->dev_addr, new_addr); - efx->type->sriov_mac_address_changed(efx); + if (efx->type->set_mac_address) { + rc = efx->type->set_mac_address(efx); + if (rc) { + ether_addr_copy(net_dev->dev_addr, old_addr); + return rc; + } + } /* Reconfigure the MAC */ mutex_lock(&efx->mac_lock); - efx->type->reconfigure_mac(efx); + efx_mac_reconfigure(efx); mutex_unlock(&efx->mac_lock); return 0; @@ -2199,7 +2299,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data) return 0; } -static const struct net_device_ops efx_farch_netdev_ops = { +static const struct net_device_ops efx_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, .ndo_get_stats64 = efx_net_stats, @@ -2212,10 +2312,12 @@ static const struct net_device_ops efx_farch_netdev_ops = { .ndo_set_rx_mode = efx_set_rx_mode, .ndo_set_features = efx_set_features, #ifdef CONFIG_SFC_SRIOV - .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac, - .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan, - .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk, - .ndo_get_vf_config = efx_siena_sriov_get_vf_config, + .ndo_set_vf_mac = efx_sriov_set_vf_mac, + .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, + .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, + .ndo_get_vf_config = efx_sriov_get_vf_config, + .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, + .ndo_get_phys_port_id = efx_sriov_get_phys_port_id, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, @@ -2229,29 +2331,6 @@ static const struct net_device_ops efx_farch_netdev_ops = { #endif }; -static const struct net_device_ops efx_ef10_netdev_ops = { - .ndo_open = efx_net_open, - .ndo_stop = efx_net_stop, - .ndo_get_stats64 = efx_net_stats, - .ndo_tx_timeout = efx_watchdog, - .ndo_start_xmit = efx_hard_start_xmit, - .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = efx_ioctl, - .ndo_change_mtu = efx_change_mtu, - .ndo_set_mac_address = efx_set_mac_address, - .ndo_set_rx_mode = efx_set_rx_mode, - .ndo_set_features = efx_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = efx_netpoll, -#endif -#ifdef CONFIG_NET_RX_BUSY_POLL - .ndo_busy_poll = efx_busy_poll, -#endif -#ifdef CONFIG_RFS_ACCEL - .ndo_rx_flow_steer = efx_filter_rfs, -#endif -}; - static void efx_update_name(struct efx_nic *efx) { strcpy(efx->name, efx->net_dev->name); @@ -2264,8 +2343,7 @@ static int efx_netdev_event(struct notifier_block *this, { struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); - if ((net_dev->netdev_ops == &efx_farch_netdev_ops || - net_dev->netdev_ops == &efx_ef10_netdev_ops) && + if ((net_dev->netdev_ops == &efx_netdev_ops) && event == NETDEV_CHANGENAME) efx_update_name(netdev_priv(net_dev)); @@ -2284,6 +2362,28 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) } static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); +#ifdef CONFIG_SFC_MCDI_LOGGING +static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled); +} +static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + bool enable = count > 0 && *buf != '0'; + + mcdi->logging_enabled = enable; + return count; +} +static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log); +#endif + static int efx_register_netdev(struct efx_nic *efx) { struct net_device *net_dev = efx->net_dev; @@ -2292,12 +2392,9 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->watchdog_timeo = 5 * HZ; net_dev->irq = efx->pci_dev->irq; - if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { - net_dev->netdev_ops = &efx_ef10_netdev_ops; + net_dev->netdev_ops = &efx_netdev_ops; + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) net_dev->priv_flags |= IFF_UNICAST_FLT; - } else { - net_dev->netdev_ops = &efx_farch_netdev_ops; - } net_dev->ethtool_ops = &efx_ethtool_ops; net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; @@ -2344,9 +2441,21 @@ static int efx_register_netdev(struct efx_nic *efx) "failed to init net dev attributes\n"); goto fail_registered; } +#ifdef CONFIG_SFC_MCDI_LOGGING + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); + goto fail_attr_mcdi_logging; + } +#endif return 0; +#ifdef CONFIG_SFC_MCDI_LOGGING +fail_attr_mcdi_logging: + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); +#endif fail_registered: rtnl_lock(); efx_dissociate(efx); @@ -2365,13 +2474,14 @@ static void efx_unregister_netdev(struct efx_nic *efx) BUG_ON(netdev_priv(efx->net_dev) != efx); - strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); - device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); - - rtnl_lock(); - unregister_netdevice(efx->net_dev); - efx->state = STATE_UNINIT; - rtnl_unlock(); + if (efx_dev_registered(efx)) { + strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); +#ifdef CONFIG_SFC_MCDI_LOGGING + device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); +#endif + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); + unregister_netdev(efx->net_dev); + } } /************************************************************************** @@ -2393,7 +2503,8 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) efx_disable_interrupts(efx); mutex_lock(&efx->mac_lock); - if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && + method != RESET_TYPE_DATAPATH) efx->phy_op->fini(efx); efx->type->fini(efx); } @@ -2422,11 +2533,13 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) if (!ok) goto fail; - if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && + method != RESET_TYPE_DATAPATH) { rc = efx->phy_op->init(efx); if (rc) goto fail; - if (efx->phy_op->reconfigure(efx)) + rc = efx->phy_op->reconfigure(efx); + if (rc && rc != -EPERM) netif_err(efx, drv, efx->net_dev, "could not restore PHY settings\n"); } @@ -2434,8 +2547,20 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) rc = efx_enable_interrupts(efx); if (rc) goto fail; + +#ifdef CONFIG_SFC_SRIOV + rc = efx->type->vswitching_restore(efx); + if (rc) /* not fatal; the PF will still work fine */ + netif_warn(efx, probe, efx->net_dev, + "failed to restore vswitching rc=%d;" + " VFs may not function\n", rc); +#endif + + down_read(&efx->filter_sem); efx_restore_filters(efx); - efx->type->sriov_reset(efx); + up_read(&efx->filter_sem); + if (efx->type->sriov_reset) + efx->type->sriov_reset(efx); mutex_unlock(&efx->mac_lock); @@ -2605,6 +2730,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) case RESET_TYPE_WORLD: case RESET_TYPE_DISABLE: case RESET_TYPE_RECOVER_OR_DISABLE: + case RESET_TYPE_DATAPATH: case RESET_TYPE_MC_BIST: case RESET_TYPE_MCDI_TIMEOUT: method = type; @@ -2655,6 +2781,8 @@ static const struct pci_device_id efx_pci_table[] = { .driver_data = (unsigned long) &siena_a0_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */ .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, + {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */ + .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type}, {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */ .driver_data = (unsigned long) &efx_hunt_a0_nic_type}, {0} /* end of list */ @@ -2809,7 +2937,8 @@ static void efx_pci_remove_main(struct efx_nic *efx) } /* Final NIC shutdown - * This is called only at module unload (or hotplug removal). + * This is called only at module unload (or hotplug removal). A PF can call + * this on its VFs to ensure they are unbound first. */ static void efx_pci_remove(struct pci_dev *pci_dev) { @@ -2824,9 +2953,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev) efx_dissociate(efx); dev_close(efx->net_dev); efx_disable_interrupts(efx); + efx->state = STATE_UNINIT; rtnl_unlock(); - efx->type->sriov_fini(efx); + if (efx->type->sriov_fini) + efx->type->sriov_fini(efx); + efx_unregister_netdev(efx); efx_mtd_remove(efx); @@ -3008,7 +3140,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev, netif_info(efx, probe, efx->net_dev, "Solarflare NIC detected\n"); - efx_probe_vpd_strings(efx); + if (!efx->type->is_vf) + efx_probe_vpd_strings(efx); /* Set up basic I/O (BAR mappings etc) */ rc = efx_init_io(efx); @@ -3023,10 +3156,12 @@ static int efx_pci_probe(struct pci_dev *pci_dev, if (rc) goto fail4; - rc = efx->type->sriov_init(efx); - if (rc) - netif_err(efx, probe, efx->net_dev, - "SR-IOV can't be enabled rc %d\n", rc); + if (efx->type->sriov_init) { + rc = efx->type->sriov_init(efx); + if (rc) + netif_err(efx, probe, efx->net_dev, + "SR-IOV can't be enabled rc %d\n", rc); + } netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); @@ -3058,6 +3193,26 @@ static int efx_pci_probe(struct pci_dev *pci_dev, return rc; } +/* efx_pci_sriov_configure returns the actual number of Virtual Functions + * enabled on success + */ +#ifdef CONFIG_SFC_SRIOV +static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + int rc; + struct efx_nic *efx = pci_get_drvdata(dev); + + if (efx->type->sriov_configure) { + rc = efx->type->sriov_configure(efx, num_vfs); + if (rc) + return rc; + else + return num_vfs; + } else + return -EOPNOTSUPP; +} +#endif + static int efx_pm_freeze(struct device *dev) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); @@ -3267,7 +3422,7 @@ out: * with our request for slot reset the mmio_enabled callback will never be * called, and the link_reset callback is not used by AER or EEH mechanisms. */ -static struct pci_error_handlers efx_err_handlers = { +static const struct pci_error_handlers efx_err_handlers = { .error_detected = efx_io_error_detected, .slot_reset = efx_io_slot_reset, .resume = efx_io_resume, @@ -3280,6 +3435,9 @@ static struct pci_driver efx_pci_driver = { .remove = efx_pci_remove, .driver.pm = &efx_pm_ops, .err_handler = &efx_err_handlers, +#ifdef CONFIG_SFC_SRIOV + .sriov_configure = efx_pci_sriov_configure, +#endif }; /************************************************************************** @@ -3302,9 +3460,11 @@ static int __init efx_init_module(void) if (rc) goto err_notifier; +#ifdef CONFIG_SFC_SRIOV rc = efx_init_sriov(); if (rc) goto err_sriov; +#endif reset_workqueue = create_singlethread_workqueue("sfc_reset"); if (!reset_workqueue) { @@ -3321,8 +3481,10 @@ static int __init efx_init_module(void) err_pci: destroy_workqueue(reset_workqueue); err_reset: +#ifdef CONFIG_SFC_SRIOV efx_fini_sriov(); err_sriov: +#endif unregister_netdevice_notifier(&efx_netdev_notifier); err_notifier: return rc; @@ -3334,7 +3496,9 @@ static void __exit efx_exit_module(void) pci_unregister_driver(&efx_pci_driver); destroy_workqueue(reset_workqueue); +#ifdef CONFIG_SFC_SRIOV efx_fini_sriov(); +#endif unregister_netdevice_notifier(&efx_netdev_notifier); } diff --git a/kernel/drivers/net/ethernet/sfc/efx.h b/kernel/drivers/net/ethernet/sfc/efx.h index 2587c582a..10827476b 100644 --- a/kernel/drivers/net/ethernet/sfc/efx.h +++ b/kernel/drivers/net/ethernet/sfc/efx.h @@ -15,7 +15,12 @@ #include "filter.h" /* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ +/* All VFs use BAR 0/1 for memory */ #define EFX_MEM_BAR 2 +#define EFX_MEM_VF_BAR 0 + +int efx_net_open(struct net_device *net_dev); +int efx_net_stop(struct net_device *net_dev); /* TX */ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); @@ -30,8 +35,10 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); int efx_setup_tc(struct net_device *net_dev, u8 num_tc); unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); extern unsigned int efx_piobuf_size; +extern bool efx_separate_tx_channels; /* RX */ +void efx_set_default_rx_indir_table(struct efx_nic *efx); void efx_rx_config_page_split(struct efx_nic *efx); int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); @@ -69,8 +76,15 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) +static inline bool efx_rss_enabled(struct efx_nic *efx) +{ + return efx->rss_spread > 1; +} + /* Filters */ +void efx_mac_reconfigure(struct efx_nic *efx); + /** * efx_filter_insert_filter - add or replace a filter * @efx: NIC in which to insert the filter @@ -220,6 +234,13 @@ static inline void efx_mtd_rename(struct efx_nic *efx) {} static inline void efx_mtd_remove(struct efx_nic *efx) {} #endif +#ifdef CONFIG_SFC_SRIOV +static inline unsigned int efx_vf_size(struct efx_nic *efx) +{ + return 1 << efx->vi_scale; +} +#endif + static inline void efx_schedule_channel(struct efx_channel *channel) { netif_vdbg(channel->efx, intr, channel->efx->net_dev, diff --git a/kernel/drivers/net/ethernet/sfc/enum.h b/kernel/drivers/net/ethernet/sfc/enum.h index d1dbb5fb3..c94f56271 100644 --- a/kernel/drivers/net/ethernet/sfc/enum.h +++ b/kernel/drivers/net/ethernet/sfc/enum.h @@ -143,6 +143,7 @@ enum efx_loopback_mode { * @RESET_TYPE_WORLD: Reset as much as possible * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if * unsuccessful. + * @RESET_TYPE_DATAPATH: Reset datapath only. * @RESET_TYPE_MC_BIST: MC entering BIST mode. * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog @@ -159,6 +160,7 @@ enum reset_type { RESET_TYPE_ALL, RESET_TYPE_WORLD, RESET_TYPE_RECOVER_OR_DISABLE, + RESET_TYPE_DATAPATH, RESET_TYPE_MC_BIST, RESET_TYPE_DISABLE, RESET_TYPE_MAX_METHOD, diff --git a/kernel/drivers/net/ethernet/sfc/ethtool.c b/kernel/drivers/net/ethernet/sfc/ethtool.c index 4835bc0d0..034797661 100644 --- a/kernel/drivers/net/ethernet/sfc/ethtool.c +++ b/kernel/drivers/net/ethernet/sfc/ethtool.c @@ -734,7 +734,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, /* Reconfigure the MAC. The PHY *may* generate a link state change event * if the user just changed the advertised capabilities, but there's no * harm doing this twice */ - efx->type->reconfigure_mac(efx); + efx_mac_reconfigure(efx); out: mutex_unlock(&efx->mac_lock); @@ -1109,9 +1109,8 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, return -EOPNOTSUPP; if (!indir) return 0; - memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table)); - efx->type->rx_push_rss_config(efx); - return 0; + + return efx->type->rx_push_rss_config(efx, true, indir); } static int efx_ethtool_get_ts_info(struct net_device *net_dev, diff --git a/kernel/drivers/net/ethernet/sfc/falcon.c b/kernel/drivers/net/ethernet/sfc/falcon.c index f166c8ef3..d790cb8d9 100644 --- a/kernel/drivers/net/ethernet/sfc/falcon.c +++ b/kernel/drivers/net/ethernet/sfc/falcon.c @@ -477,16 +477,29 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) * ************************************************************************** */ +static int dummy_rx_push_rss_config(struct efx_nic *efx, bool user, + const u32 *rx_indir_table) +{ + (void) efx; + (void) user; + (void) rx_indir_table; + return -ENOSYS; +} -static void falcon_b0_rx_push_rss_config(struct efx_nic *efx) +static int falcon_b0_rx_push_rss_config(struct efx_nic *efx, bool user, + const u32 *rx_indir_table) { efx_oword_t temp; + (void) user; /* Set hash key for IPv4 */ memcpy(&temp, efx->rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); + memcpy(efx->rx_indir_table, rx_indir_table, + sizeof(efx->rx_indir_table)); efx_farch_rx_push_indir_table(efx); + return 0; } /************************************************************************** @@ -2358,6 +2371,7 @@ static int falcon_probe_nic(struct efx_nic *efx) efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 : EFX_MAX_CHANNELS); + efx->max_tx_channels = efx->max_channels; efx->timer_quantum_ns = 4968; /* 621 cycles */ /* Initialise I2C adapter */ @@ -2507,7 +2521,7 @@ static int falcon_init_nic(struct efx_nic *efx) falcon_init_rx_cfg(efx); if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { - falcon_b0_rx_push_rss_config(efx); + falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table); /* Set destination of both TX and RX Flush events */ EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); @@ -2687,6 +2701,8 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type) */ const struct efx_nic_type falcon_a1_nic_type = { + .is_vf = false, + .mem_bar = EFX_MEM_BAR, .mem_map_size = falcon_a1_mem_map_size, .probe = falcon_probe_nic, .remove = falcon_remove_nic, @@ -2729,7 +2745,7 @@ const struct efx_nic_type falcon_a1_nic_type = { .tx_init = efx_farch_tx_init, .tx_remove = efx_farch_tx_remove, .tx_write = efx_farch_tx_write, - .rx_push_rss_config = efx_port_dummy_op_void, + .rx_push_rss_config = dummy_rx_push_rss_config, .rx_probe = efx_farch_rx_probe, .rx_init = efx_farch_rx_init, .rx_remove = efx_farch_rx_remove, @@ -2766,11 +2782,6 @@ const struct efx_nic_type falcon_a1_nic_type = { .mtd_write = falcon_mtd_write, .mtd_sync = falcon_mtd_sync, #endif - .sriov_init = efx_falcon_sriov_init, - .sriov_fini = efx_falcon_sriov_fini, - .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed, - .sriov_wanted = efx_falcon_sriov_wanted, - .sriov_reset = efx_falcon_sriov_reset, .revision = EFX_REV_FALCON_A1, .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, @@ -2788,6 +2799,8 @@ const struct efx_nic_type falcon_a1_nic_type = { }; const struct efx_nic_type falcon_b0_nic_type = { + .is_vf = false, + .mem_bar = EFX_MEM_BAR, .mem_map_size = falcon_b0_mem_map_size, .probe = falcon_probe_nic, .remove = falcon_remove_nic, @@ -2867,11 +2880,6 @@ const struct efx_nic_type falcon_b0_nic_type = { .mtd_write = falcon_mtd_write, .mtd_sync = falcon_mtd_sync, #endif - .sriov_init = efx_falcon_sriov_init, - .sriov_fini = efx_falcon_sriov_fini, - .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed, - .sriov_wanted = efx_falcon_sriov_wanted, - .sriov_reset = efx_falcon_sriov_reset, .revision = EFX_REV_FALCON_B0, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, diff --git a/kernel/drivers/net/ethernet/sfc/farch.c b/kernel/drivers/net/ethernet/sfc/farch.c index bb89e96a1..133e9e35b 100644 --- a/kernel/drivers/net/ethernet/sfc/farch.c +++ b/kernel/drivers/net/ethernet/sfc/farch.c @@ -20,6 +20,8 @@ #include "efx.h" #include "nic.h" #include "farch_regs.h" +#include "sriov.h" +#include "siena_sriov.h" #include "io.h" #include "workarounds.h" @@ -319,7 +321,9 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue) unsigned write_ptr; unsigned old_write_count = tx_queue->write_count; - BUG_ON(tx_queue->write_count == tx_queue->insert_count); + tx_queue->xmit_more_available = false; + if (unlikely(tx_queue->write_count == tx_queue->insert_count)) + return; do { write_ptr = tx_queue->write_count & tx_queue->ptr_mask; @@ -1198,13 +1202,17 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", channel->channel, ev_sub_data); efx_farch_handle_tx_flush_done(efx, event); +#ifdef CONFIG_SFC_SRIOV efx_siena_sriov_tx_flush_done(efx, event); +#endif break; case FSE_AZ_RX_DESCQ_FLS_DONE_EV: netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", channel->channel, ev_sub_data); efx_farch_handle_rx_flush_done(efx, event); +#ifdef CONFIG_SFC_SRIOV efx_siena_sriov_rx_flush_done(efx, event); +#endif break; case FSE_AZ_EVQ_INIT_DONE_EV: netif_dbg(efx, hw, efx->net_dev, @@ -1242,8 +1250,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); - } else + } +#ifdef CONFIG_SFC_SRIOV + else efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); +#endif break; case FSE_BZ_TX_DSC_ERROR_EV: if (ev_sub_data < EFX_VI_BASE) { @@ -1252,8 +1263,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); - } else + } +#ifdef CONFIG_SFC_SRIOV + else efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); +#endif break; default: netif_vdbg(efx, hw, efx->net_dev, @@ -1317,9 +1331,11 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget) case FSE_AZ_EV_CODE_DRIVER_EV: efx_farch_handle_driver_event(channel, &event); break; +#ifdef CONFIG_SFC_SRIOV case FSE_CZ_EV_CODE_USER_EV: efx_siena_sriov_event(channel, &event); break; +#endif case FSE_CZ_EV_CODE_MCDI_EV: efx_mcdi_process_event(channel, &event); break; @@ -1685,28 +1701,32 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); #ifdef CONFIG_SFC_SRIOV - if (efx->type->sriov_wanted(efx)) { - unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; - - nic_data->vf_buftbl_base = buftbl_min; - - vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; - vi_count = max(vi_count, EFX_VI_BASE); - buftbl_free = (sram_lim_qw - buftbl_min - - vi_count * vi_dc_entries); - - entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * - efx_vf_size(efx)); - vf_limit = min(buftbl_free / entries_per_vf, - (1024U - EFX_VI_BASE) >> efx->vi_scale); - - if (efx->vf_count > vf_limit) { - netif_err(efx, probe, efx->net_dev, - "Reducing VF count from from %d to %d\n", - efx->vf_count, vf_limit); - efx->vf_count = vf_limit; + if (efx->type->sriov_wanted) { + if (efx->type->sriov_wanted(efx)) { + unsigned vi_dc_entries, buftbl_free; + unsigned entries_per_vf, vf_limit; + + nic_data->vf_buftbl_base = buftbl_min; + + vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; + vi_count = max(vi_count, EFX_VI_BASE); + buftbl_free = (sram_lim_qw - buftbl_min - + vi_count * vi_dc_entries); + + entries_per_vf = ((vi_dc_entries + + EFX_VF_BUFTBL_PER_VI) * + efx_vf_size(efx)); + vf_limit = min(buftbl_free / entries_per_vf, + (1024U - EFX_VI_BASE) >> efx->vi_scale); + + if (efx->vf_count > vf_limit) { + netif_err(efx, probe, efx->net_dev, + "Reducing VF count from from %d to %d\n", + efx->vf_count, vf_limit); + efx->vf_count = vf_limit; + } + vi_count += efx->vf_count * efx_vf_size(efx); } - vi_count += efx->vf_count * efx_vf_size(efx); } #endif @@ -2222,7 +2242,7 @@ efx_farch_filter_init_rx_auto(struct efx_nic *efx, */ spec->priority = EFX_FILTER_PRI_AUTO; spec->flags = (EFX_FILTER_FLAG_RX | - (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | + (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); spec->dmaq_id = 0; } diff --git a/kernel/drivers/net/ethernet/sfc/mcdi.c b/kernel/drivers/net/ethernet/sfc/mcdi.c index d37928f01..41fb6b60a 100644 --- a/kernel/drivers/net/ethernet/sfc/mcdi.c +++ b/kernel/drivers/net/ethernet/sfc/mcdi.c @@ -8,7 +8,8 @@ */ #include <linux/delay.h> -#include <asm/cmpxchg.h> +#include <linux/moduleparam.h> +#include <linux/atomic.h> #include "net_driver.h" #include "nic.h" #include "io.h" @@ -54,18 +55,32 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, static bool efx_mcdi_poll_once(struct efx_nic *efx); static void efx_mcdi_abandon(struct efx_nic *efx); +#ifdef CONFIG_SFC_MCDI_LOGGING +static bool mcdi_logging_default; +module_param(mcdi_logging_default, bool, 0644); +MODULE_PARM_DESC(mcdi_logging_default, + "Enable MCDI logging on newly-probed functions"); +#endif + int efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; bool already_attached; - int rc; + int rc = -ENOMEM; efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); if (!efx->mcdi) - return -ENOMEM; + goto fail; mcdi = efx_mcdi(efx); mcdi->efx = efx; +#ifdef CONFIG_SFC_MCDI_LOGGING + /* consuming code assumes buffer is page-sized */ + mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL); + if (!mcdi->logging_buffer) + goto fail1; + mcdi->logging_enabled = mcdi_logging_default; +#endif init_waitqueue_head(&mcdi->wq); spin_lock_init(&mcdi->iface_lock); mcdi->state = MCDI_STATE_QUIESCENT; @@ -81,7 +96,7 @@ int efx_mcdi_init(struct efx_nic *efx) /* Recover from a failed assertion before probing */ rc = efx_mcdi_handle_assertion(efx); if (rc) - return rc; + goto fail2; /* Let the MC (and BMC, if this is a LOM) know that the driver * is loaded. We should do this before we reset the NIC. @@ -90,7 +105,7 @@ int efx_mcdi_init(struct efx_nic *efx) if (rc) { netif_err(efx, probe, efx->net_dev, "Unable to register driver with MCPU\n"); - return rc; + goto fail2; } if (already_attached) /* Not a fatal error */ @@ -102,6 +117,15 @@ int efx_mcdi_init(struct efx_nic *efx) efx->primary = efx; return 0; +fail2: +#ifdef CONFIG_SFC_MCDI_LOGGING + free_page((unsigned long)mcdi->logging_buffer); +fail1: +#endif + kfree(efx->mcdi); + efx->mcdi = NULL; +fail: + return rc; } void efx_mcdi_fini(struct efx_nic *efx) @@ -114,6 +138,10 @@ void efx_mcdi_fini(struct efx_nic *efx) /* Relinquish the device (back to the BMC, if this is a LOM) */ efx_mcdi_drv_attach(efx, false, NULL); +#ifdef CONFIG_SFC_MCDI_LOGGING + free_page((unsigned long)efx->mcdi->iface.logging_buffer); +#endif + kfree(efx->mcdi); } @@ -121,6 +149,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); +#ifdef CONFIG_SFC_MCDI_LOGGING + char *buf = mcdi->logging_buffer; /* page-sized */ +#endif efx_dword_t hdr[2]; size_t hdr_len; u32 xflags, seqno; @@ -165,6 +196,31 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, hdr_len = 8; } +#ifdef CONFIG_SFC_MCDI_LOGGING + if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { + int bytes = 0; + int i; + /* Lengths should always be a whole number of dwords, so scream + * if they're not. + */ + WARN_ON_ONCE(hdr_len % 4); + WARN_ON_ONCE(inlen % 4); + + /* We own the logging buffer, as only one MCDI can be in + * progress on a NIC at any one time. So no need for locking. + */ + for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++) + bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr[i].u32[0])); + + for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++) + bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(inbuf[i].u32[0])); + + netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf); + } +#endif + efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); mcdi->new_epoch = false; @@ -206,6 +262,9 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned int respseq, respcmd, error; +#ifdef CONFIG_SFC_MCDI_LOGGING + char *buf = mcdi->logging_buffer; /* page-sized */ +#endif efx_dword_t hdr; efx->type->mcdi_read_response(efx, &hdr, 0, 4); @@ -223,6 +282,39 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); } +#ifdef CONFIG_SFC_MCDI_LOGGING + if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { + size_t hdr_len, data_len; + int bytes = 0; + int i; + + WARN_ON_ONCE(mcdi->resp_hdr_len % 4); + hdr_len = mcdi->resp_hdr_len / 4; + /* MCDI_DECLARE_BUF ensures that underlying buffer is padded + * to dword size, and the MCDI buffer is always dword size + */ + data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4); + + /* We own the logging buffer, as only one MCDI can be in + * progress on a NIC at any one time. So no need for locking. + */ + for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) { + efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4); + bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); + } + + for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) { + efx->type->mcdi_read_response(efx, &hdr, + mcdi->resp_hdr_len + (i * 4), 4); + bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); + } + + netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf); + } +#endif + if (error && mcdi->resp_data_len == 0) { netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); mcdi->resprc = -EIO; @@ -406,7 +498,7 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) struct efx_mcdi_async_param *async; size_t hdr_len, data_len, err_len; efx_dword_t *outbuf; - MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0); + MCDI_DECLARE_BUF_ERR(errbuf); int rc; if (cmpxchg(&mcdi->state, @@ -534,7 +626,7 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, size_t *outlen_actual, bool quiet) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0); + MCDI_DECLARE_BUF_ERR(errbuf); int rc; if (mcdi->mode == MCDI_MODE_POLL) @@ -936,10 +1028,21 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) /* Consume the status word since efx_mcdi_rpc_finish() won't */ for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { - if (efx_mcdi_poll_reboot(efx)) + rc = efx_mcdi_poll_reboot(efx); + if (rc) break; udelay(MCDI_STATUS_DELAY_US); } + + /* On EF10, a CODE_MC_REBOOT event can be received without the + * reboot detection in efx_mcdi_poll_reboot() being triggered. + * If zero was returned from the final call to + * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the + * MC has definitely rebooted so prepare for the reset. + */ + if (!rc && efx->type->mcdi_reboot_detected) + efx->type->mcdi_reboot_detected(efx); + mcdi->new_epoch = true; /* Nobody was waiting for an MCDI request, so trigger a reset */ @@ -1035,7 +1138,9 @@ void efx_mcdi_process_event(struct efx_channel *channel, /* MAC stats are gather lazily. We can ignore this. */ break; case MCDI_EVENT_CODE_FLR: - efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); + if (efx->type->sriov_flr) + efx->type->sriov_flr(efx, + MCDI_EVENT_FIELD(*event, FLR_VF)); break; case MCDI_EVENT_CODE_PTP_RX: case MCDI_EVENT_CODE_PTP_FAULT: @@ -1081,9 +1186,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) { - MCDI_DECLARE_BUF(outbuf, - max(MC_CMD_GET_VERSION_OUT_LEN, - MC_CMD_GET_CAPABILITIES_OUT_LEN)); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN); size_t outlength; const __le16 *ver_words; size_t offset; @@ -1108,19 +1211,11 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) * single version. Report which variants are running. */ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { - BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, - outbuf, sizeof(outbuf), &outlength); - if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN) - offset += snprintf( - buf + offset, len - offset, " rx? tx?"); - else - offset += snprintf( - buf + offset, len - offset, " rx%x tx%x", - MCDI_WORD(outbuf, - GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID), - MCDI_WORD(outbuf, - GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID)); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + offset += snprintf(buf + offset, len - offset, " rx%x tx%x", + nic_data->rx_dpcpu_fw_id, + nic_data->tx_dpcpu_fw_id); /* It's theoretically possible for the string to exceed 31 * characters, though in practice the first three version @@ -1150,10 +1245,26 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY); - rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); - if (rc) + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID + * specified will fail with EPERM, and we have to tell the MC we don't + * care what firmware we get. + */ + if (rc == -EPERM) { + netif_dbg(efx, probe, efx->net_dev, + "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n"); + MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, + MC_CMD_FW_DONT_CARE); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, + sizeof(inbuf), outbuf, sizeof(outbuf), + &outlen); + } + if (rc) { + efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf), + outbuf, outlen, rc); goto fail; + } if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { rc = -EIO; goto fail; @@ -1178,16 +1289,6 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, * and are completely trusted by firmware. Abort probing * if that's not true for this function. */ - if (driver_operating && - (efx->mcdi->fn_flags & - (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | - 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) != - (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | - 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) { - netif_err(efx, probe, efx->net_dev, - "This driver version only supports one function per port\n"); - return -ENODEV; - } if (was_attached != NULL) *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); @@ -1385,10 +1486,13 @@ fail1: return rc; } +/* Returns 1 if an assertion was read, 0 if no assertion had fired, + * negative on error. + */ static int efx_mcdi_read_assertion(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); - MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); unsigned int flags, index; const char *reason; size_t outlen; @@ -1406,6 +1510,8 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx) rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS, inbuf, MC_CMD_GET_ASSERTS_IN_LEN, outbuf, sizeof(outbuf), &outlen); + if (rc == -EPERM) + return 0; } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); if (rc) { @@ -1443,24 +1549,31 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx) MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS, index)); - return 0; + return 1; } -static void efx_mcdi_exit_assertion(struct efx_nic *efx) +static int efx_mcdi_exit_assertion(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); + int rc; /* If the MC is running debug firmware, it might now be * waiting for a debugger to attach, but we just want it to * reboot. We set a flag that makes the command a no-op if it - * has already done so. We don't know what return code to - * expect (0 or -EIO), so ignore it. + * has already done so. + * The MCDI will thus return either 0 or -EIO. */ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); - (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, - NULL, 0, NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, + NULL, 0, NULL); + if (rc == -EIO) + rc = 0; + if (rc) + efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN, + NULL, 0, rc); + return rc; } int efx_mcdi_handle_assertion(struct efx_nic *efx) @@ -1468,12 +1581,10 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx) int rc; rc = efx_mcdi_read_assertion(efx); - if (rc) + if (rc <= 0) return rc; - efx_mcdi_exit_assertion(efx); - - return 0; + return efx_mcdi_exit_assertion(efx); } void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) @@ -1550,7 +1661,9 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) if (rc) return rc; - if (method == RESET_TYPE_WORLD) + if (method == RESET_TYPE_DATAPATH) + return 0; + else if (method == RESET_TYPE_WORLD) return efx_mcdi_reset_mc(efx); else return efx_mcdi_reset_func(efx); @@ -1677,15 +1790,65 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx) return rc; } -int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled) +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, + unsigned int *flags) { MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN); + size_t outlen; + int rc; BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); - return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (!flags) + return 0; + + if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN) + *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS); + else + *flags = 0; + + return 0; +} + +int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, + unsigned int *enabled_out) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN); + size_t outlen; + int rc; + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + goto fail; + + if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) { + rc = -EIO; + goto fail; + } + + if (impl_out) + *impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED); + + if (enabled_out) + *enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED); + + return 0; + +fail: + /* Older firmware lacks GET_WORKAROUNDS and this isn't especially + * terrifying. The call site will have to deal with it though. + */ + netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR, + efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + return rc; } #ifdef CONFIG_SFC_MTD diff --git a/kernel/drivers/net/ethernet/sfc/mcdi.h b/kernel/drivers/net/ethernet/sfc/mcdi.h index 56465f746..025d504c4 100644 --- a/kernel/drivers/net/ethernet/sfc/mcdi.h +++ b/kernel/drivers/net/ethernet/sfc/mcdi.h @@ -58,6 +58,8 @@ enum efx_mcdi_mode { * enabled * @async_list: Queue of asynchronous requests * @async_timer: Timer for asynchronous request timeout + * @logging_buffer: buffer that may be used to build MCDI tracing messages + * @logging_enabled: whether to trace MCDI */ struct efx_mcdi_iface { struct efx_nic *efx; @@ -74,6 +76,10 @@ struct efx_mcdi_iface { spinlock_t async_lock; struct list_head async_list; struct timer_list async_timer; +#ifdef CONFIG_SFC_MCDI_LOGGING + char *logging_buffer; + bool logging_enabled; +#endif }; struct efx_mcdi_mon { @@ -176,10 +182,12 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); * 32-bit-aligned. Also, on Siena we must copy to the MC shared * memory strictly 32 bits at a time, so add any necessary padding. */ -#define MCDI_DECLARE_BUF(_name, _len) \ +#define _MCDI_DECLARE_BUF(_name, _len) \ efx_dword_t _name[DIV_ROUND_UP(_len, 4)] -#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \ - MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8)) +#define MCDI_DECLARE_BUF(_name, _len) \ + _MCDI_DECLARE_BUF(_name, _len) = {{{0}}} +#define MCDI_DECLARE_BUF_ERR(_name) \ + MCDI_DECLARE_BUF(_name, 8) #define _MCDI_PTR(_buf, _offset) \ ((u8 *)(_buf) + (_offset)) #define MCDI_PTR(_buf, _field) \ @@ -338,7 +346,10 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx); bool efx_mcdi_mac_check_fault(struct efx_nic *efx); enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); -int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled); +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, + unsigned int *flags); +int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, + unsigned int *enabled_out); #ifdef CONFIG_SFC_MCDI_MON int efx_mcdi_mon_probe(struct efx_nic *efx); diff --git a/kernel/drivers/net/ethernet/sfc/mcdi_pcol.h b/kernel/drivers/net/ethernet/sfc/mcdi_pcol.h index e028de10e..4cc772164 100644 --- a/kernel/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/kernel/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -26,6 +26,10 @@ * Unlike a warm boot, assume DMEM has been reloaded, so that * the MC persistent data must be reinitialised. */ #define MC_FW_TEPID_BOOT_OK (16) +/* We have entered the main firmware via recovery mode. This + * means that MC persistent data must be reinitialised, but that + * we shouldn't touch PCIe config. */ +#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32) /* BIST state has been initialized */ #define MC_FW_BIST_INIT_OK (128) @@ -169,6 +173,8 @@ #define MC_CMD_ERR_EINTR 4 /* I/O failure */ #define MC_CMD_ERR_EIO 5 +/* Already exists */ +#define MC_CMD_ERR_EEXIST 6 /* Try again */ #define MC_CMD_ERR_EAGAIN 11 /* Out of memory */ @@ -181,6 +187,10 @@ #define MC_CMD_ERR_ENODEV 19 /* Invalid argument to target */ #define MC_CMD_ERR_EINVAL 22 +/* Broken pipe */ +#define MC_CMD_ERR_EPIPE 32 +/* Read-only */ +#define MC_CMD_ERR_EROFS 30 /* Out of range */ #define MC_CMD_ERR_ERANGE 34 /* Non-recursive resource is already acquired */ @@ -226,6 +236,43 @@ #define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a /* The datapath is disabled. */ #define MC_CMD_ERR_DATAPATH_DISABLED 0x100b +/* The requesting client is not a function */ +#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c +/* The requested operation might require the + command to be passed between MCs, and the + transport doesn't support that. Should + only ever been seen over the UART. */ +#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d +/* VLAN tag(s) exists */ +#define MC_CMD_ERR_VLAN_EXIST 0x100e +/* No MAC address assigned to an EVB port */ +#define MC_CMD_ERR_NO_MAC_ADDR 0x100f +/* Notifies the driver that the request has been relayed + * to an admin function for authorization. The driver should + * wait for a PROXY_RESPONSE event and then resend its request. + * This error code is followed by a 32-bit handle that + * helps matching it with the respective PROXY_RESPONSE event. */ +#define MC_CMD_ERR_PROXY_PENDING 0x1010 +#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4 +/* The request cannot be passed for authorization because + * another request from the same function is currently being + * authorized. The drvier should try again later. */ +#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011 +/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function + * that has enabled proxying or BLOCK_INDEX points to a function that + * doesn't await an authorization. */ +#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012 +/* This code is currently only used internally in FW. Its meaning is that + * an operation failed due to lack of SR-IOV privilege. + * Normally it is translated to EPERM by send_cmd_err(), + * but it may also be used to trigger some special mechanism + * for handling such case, e.g. to relay the failed request + * to a designated admin function for authorization. */ +#define MC_CMD_ERR_NO_PRIVILEGE 0x1013 +/* Workaround 26807 could not be turned on/off because some functions + * have already installed filters. See the comment at + * MC_CMD_WORKAROUND_BUG26807. */ +#define MC_CMD_ERR_FILTERS_PRESENT 0x1014 #define MC_CMD_ERR_CODE_OFST 0 @@ -275,6 +322,11 @@ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) +/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default + * stack ID (which must be in the range 1-255) along with an EVB port ID. + */ +#define EVB_STACK_ID(n) (((n) & 0xff) << 16) + /* Version 2 adds an optional argument to error returns: the errno value * may be followed by the (0-based) number of the first argument that @@ -394,6 +446,8 @@ #define MCDI_EVENT_AOE_BYTEBLASTER 0x9 /* enum: DDR ECC status update */ #define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa +/* enum: PTP status update */ +#define MCDI_EVENT_AOE_PTP_STATUS 0xb #define MCDI_EVENT_AOE_ERR_DATA_LBN 8 #define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 #define MCDI_EVENT_RX_ERR_RXQ_LBN 0 @@ -408,6 +462,16 @@ #define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12 #define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0 #define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16 +#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0 +#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8 +/* enum: MUM failed to load - no valid image? */ +#define MCDI_EVENT_MUM_NO_LOAD 0x1 +/* enum: MUM f/w reported an exception */ +#define MCDI_EVENT_MUM_ASSERT 0x2 +/* enum: MUM not kicking watchdog */ +#define MCDI_EVENT_MUM_WATCHDOG 0x3 +#define MCDI_EVENT_MUM_ERR_DATA_LBN 8 +#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 #define MCDI_EVENT_DATA_LBN 0 #define MCDI_EVENT_DATA_WIDTH 32 #define MCDI_EVENT_SRC_LBN 36 @@ -416,6 +480,8 @@ #define MCDI_EVENT_EV_CODE_WIDTH 4 #define MCDI_EVENT_CODE_LBN 44 #define MCDI_EVENT_CODE_WIDTH 8 +/* enum: Event generated by host software */ +#define MCDI_EVENT_SW_EVENT 0x0 /* enum: Bad assert. */ #define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum: PM Notice. */ @@ -470,6 +536,14 @@ #define MCDI_EVENT_CODE_MC_BIST 0x19 /* enum: PTP tick event providing current NIC time */ #define MCDI_EVENT_CODE_PTP_TIME 0x1a +/* enum: MUM fault */ +#define MCDI_EVENT_CODE_MUM 0x1b +/* enum: notify the designated PF of a new authorization request */ +#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c +/* enum: notify a function that awaits an authorization that its request has + * been processed and it may now resend the command + */ +#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d /* enum: Artificial event generated by host and posted via MC for test * purposes. */ @@ -537,6 +611,33 @@ /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ #define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 #define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC clock has ever been set + */ +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36 +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC and System clocks are in sync + */ +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37 +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of + * the minor value of the PTP clock + */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32 +/* Zero means that the request has been completed or authorized, and the driver + * should resend it. A non-zero value means that the authorization has been + * denied, and gives the reason. Typically it will be EPERM. + */ +#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36 +#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8 /* FCDI_EVENT structuredef */ #define FCDI_EVENT_LEN 8 @@ -581,6 +682,10 @@ #define FCDI_EVENT_CODE_PTP_TICK 0x7 /* enum: ECC error counters */ #define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8 +/* enum: Current status of PTP */ +#define FCDI_EVENT_CODE_PTP_STATUS 0x9 +/* enum: Port id config to map MC-FC port idx */ +#define FCDI_EVENT_CODE_PORT_CONFIG 0xa #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 @@ -594,11 +699,24 @@ #define FCDI_EVENT_LINK_STATE_DATA_OFST 0 #define FCDI_EVENT_LINK_STATE_DATA_LBN 0 #define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 +#define FCDI_EVENT_PTP_STATE_OFST 0 +#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */ +#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */ +#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */ +#define FCDI_EVENT_PTP_STATE_LBN 0 +#define FCDI_EVENT_PTP_STATE_WIDTH 32 #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 +/* Index of MC port being referred to */ +#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36 +#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8 +/* FC Port index that matches the MC port index in SRC */ +#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 /* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events * to the MC. Note that this structure | is overlayed over a normal FCDI event @@ -631,6 +749,90 @@ #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64 #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64 +/* MUM_EVENT structuredef */ +#define MUM_EVENT_LEN 8 +#define MUM_EVENT_CONT_LBN 32 +#define MUM_EVENT_CONT_WIDTH 1 +#define MUM_EVENT_LEVEL_LBN 33 +#define MUM_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MUM_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MUM_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MUM_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MUM_EVENT_LEVEL_FATAL 0x3 +#define MUM_EVENT_DATA_OFST 0 +#define MUM_EVENT_SENSOR_ID_LBN 0 +#define MUM_EVENT_SENSOR_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MUM_EVENT_SENSOR_STATE_LBN 8 +#define MUM_EVENT_SENSOR_STATE_WIDTH 8 +#define MUM_EVENT_PORT_PHY_READY_LBN 0 +#define MUM_EVENT_PORT_PHY_READY_WIDTH 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2 +#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3 +#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4 +#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5 +#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6 +#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1 +#define MUM_EVENT_DATA_LBN 0 +#define MUM_EVENT_DATA_WIDTH 32 +#define MUM_EVENT_SRC_LBN 36 +#define MUM_EVENT_SRC_WIDTH 8 +#define MUM_EVENT_EV_CODE_LBN 60 +#define MUM_EVENT_EV_CODE_WIDTH 4 +#define MUM_EVENT_CODE_LBN 44 +#define MUM_EVENT_CODE_WIDTH 8 +/* enum: The MUM was rebooted. */ +#define MUM_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define MUM_EVENT_CODE_ASSERT 0x2 +/* enum: Sensor failure. */ +#define MUM_EVENT_CODE_SENSOR 0x3 +/* enum: Link fault has been asserted, or has cleared. */ +#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4 +#define MUM_EVENT_SENSOR_DATA_OFST 0 +#define MUM_EVENT_SENSOR_DATA_LBN 0 +#define MUM_EVENT_SENSOR_DATA_WIDTH 32 +#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0 +#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0 +#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32 +#define MUM_EVENT_PORT_PHY_CAPS_OFST 0 +#define MUM_EVENT_PORT_PHY_CAPS_LBN 0 +#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_TECH_OFST 0 +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */ +#define MUM_EVENT_PORT_PHY_TECH_LBN 0 +#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40 +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4 + /***********************************/ /* MC_CMD_READ32 @@ -638,6 +840,8 @@ */ #define MC_CMD_READ32 0x1 +#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_READ32_IN msgrequest */ #define MC_CMD_READ32_IN_LEN 8 #define MC_CMD_READ32_IN_ADDR_OFST 0 @@ -659,6 +863,8 @@ */ #define MC_CMD_WRITE32 0x2 +#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_WRITE32_IN msgrequest */ #define MC_CMD_WRITE32_IN_LENMIN 8 #define MC_CMD_WRITE32_IN_LENMAX 252 @@ -679,26 +885,38 @@ */ #define MC_CMD_COPYCODE 0x3 +#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_COPYCODE_IN msgrequest */ #define MC_CMD_COPYCODE_IN_LEN 16 -/* Source address */ -#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 -/* enum: The main image should be entered via a copy of a single word from and - * to this address when none of the other magic behaviours are required. +/* Source address + * + * The main image should be entered via a copy of a single word from and to a + * magic address, which controls various aspects of the boot. The magic address + * is a bitfield, with each bit as documented below. */ +#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */ #define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 -/* enum: Entering the main image via a copy of a single word from and to this - * address indicates that it should not attempt to start the datapath CPUs. - * This is useful for certain soft rebooting scenarios. (Huntington only) +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below) */ #define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0 -/* enum: Entering the main image via a copy of a single word from and to this - * address indicates that it should not attempt to parse any configuration from - * flash. (In addition, the datapath CPUs will not be started, as for - * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for - * certain soft rebooting scenarios. (Huntington only) +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT, + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see + * below) */ #define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1 /* Destination address */ #define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 #define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 @@ -717,6 +935,8 @@ */ #define MC_CMD_SET_FUNC 0x4 +#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SET_FUNC_IN msgrequest */ #define MC_CMD_SET_FUNC_IN_LEN 4 /* Set function */ @@ -732,6 +952,8 @@ */ #define MC_CMD_GET_BOOT_STATUS 0x5 +#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_GET_BOOT_STATUS_IN msgrequest */ #define MC_CMD_GET_BOOT_STATUS_IN_LEN 0 @@ -758,6 +980,8 @@ */ #define MC_CMD_GET_ASSERTS 0x6 +#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_GET_ASSERTS_IN msgrequest */ #define MC_CMD_GET_ASSERTS_IN_LEN 4 /* Set to clear assertion */ @@ -783,6 +1007,10 @@ #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 /* Failing thread address */ #define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 #define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 @@ -790,10 +1018,13 @@ /***********************************/ /* MC_CMD_LOG_CTRL - * Configure the output stream for various events and messages. + * Configure the output stream for log events such as link state changes, + * sensor notifications and MCDI completions */ #define MC_CMD_LOG_CTRL 0x7 +#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_LOG_CTRL_IN msgrequest */ #define MC_CMD_LOG_CTRL_IN_LEN 8 /* Log destination */ @@ -802,6 +1033,7 @@ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum: Event queue. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 +/* Legacy argument. Must be zero. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 /* MC_CMD_LOG_CTRL_OUT msgresponse */ @@ -814,6 +1046,8 @@ */ #define MC_CMD_GET_VERSION 0x8 +#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_VERSION_IN msgrequest */ #define MC_CMD_GET_VERSION_IN_LEN 0 @@ -870,6 +1104,8 @@ */ #define MC_CMD_PTP 0xb +#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_PTP_IN msgrequest */ #define MC_CMD_PTP_IN_LEN 1 /* PTP operation code */ @@ -937,8 +1173,12 @@ * input on the same NIC. */ #define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a +/* enum: Set the PTP sync status. Status is used by firmware to report to event + * subscribers. + */ +#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b /* enum: Above this for future use. */ -#define MC_CMD_PTP_OP_MAX 0x1b +#define MC_CMD_PTP_OP_MAX 0x1c /* MC_CMD_PTP_IN_ENABLE msgrequest */ #define MC_CMD_PTP_IN_ENABLE_LEN 16 @@ -1173,8 +1413,12 @@ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ -/* Event queue to send PTP time events to */ +/* Original field containing queue ID. Now extended to include flags. */ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1 /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 @@ -1196,6 +1440,23 @@ /* 1 to enable PPS test mode, 0 to disable and return result. */ #define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 +/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* NIC - Host System Clock Synchronization status */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8 +/* enum: Host System clock and NIC clock are not in sync */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0 +/* enum: Host System clock and NIC clock are synchronized */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1 +/* If synchronized, number of seconds until clocks should be considered to be + * no longer in sync. + */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20 + /* MC_CMD_PTP_OUT msgresponse */ #define MC_CMD_PTP_OUT_LEN 0 @@ -1357,7 +1618,7 @@ #define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2 /* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */ -#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24 /* Time format required/used by for this NIC. Applies to all PTP MCDI * operations that pass times between the host and firmware. If this operation * is not supported (older firmware) a format of seconds and nanoseconds should @@ -1378,6 +1639,13 @@ * end and start times minus the time that the MC waited for host end. */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 +/* Various PTP capabilities */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 @@ -1397,6 +1665,9 @@ /* Enum values, see field(s): */ /* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ +/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0 + /***********************************/ /* MC_CMD_CSR_READ32 @@ -1404,6 +1675,8 @@ */ #define MC_CMD_CSR_READ32 0xc +#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_CSR_READ32_IN msgrequest */ #define MC_CMD_CSR_READ32_IN_LEN 12 /* Address */ @@ -1428,6 +1701,8 @@ */ #define MC_CMD_CSR_WRITE32 0xd +#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_CSR_WRITE32_IN msgrequest */ #define MC_CMD_CSR_WRITE32_IN_LENMIN 12 #define MC_CMD_CSR_WRITE32_IN_LENMAX 252 @@ -1452,6 +1727,8 @@ */ #define MC_CMD_HP 0x54 +#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_HP_IN msgrequest */ #define MC_CMD_HP_IN_LEN 16 /* HP OCSD sub-command. When address is not NULL, request activation of OCSD at @@ -1493,6 +1770,8 @@ */ #define MC_CMD_STACKINFO 0xf +#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_STACKINFO_IN msgrequest */ #define MC_CMD_STACKINFO_IN_LEN 0 @@ -1513,6 +1792,8 @@ */ #define MC_CMD_MDIO_READ 0x10 +#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_MDIO_READ_IN msgrequest */ #define MC_CMD_MDIO_READ_IN_LEN 16 /* Bus number; there are two MDIO buses: one for the internal PHY, and one for @@ -1552,6 +1833,8 @@ */ #define MC_CMD_MDIO_WRITE 0x11 +#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_MDIO_WRITE_IN msgrequest */ #define MC_CMD_MDIO_WRITE_IN_LEN 20 /* Bus number; there are two MDIO buses: one for the internal PHY, and one for @@ -1591,6 +1874,8 @@ */ #define MC_CMD_DBI_WRITE 0x12 +#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_DBI_WRITE_IN msgrequest */ #define MC_CMD_DBI_WRITE_IN_LENMIN 12 #define MC_CMD_DBI_WRITE_IN_LENMAX 252 @@ -1739,6 +2024,8 @@ */ #define MC_CMD_GET_BOARD_CFG 0x18 +#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_BOARD_CFG_IN msgrequest */ #define MC_CMD_GET_BOARD_CFG_IN_LEN 0 @@ -1778,6 +2065,8 @@ */ #define MC_CMD_DBI_READX 0x19 +#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_DBI_READX_IN msgrequest */ #define MC_CMD_DBI_READX_IN_LENMIN 8 #define MC_CMD_DBI_READX_IN_LENMAX 248 @@ -1822,6 +2111,8 @@ */ #define MC_CMD_SET_RAND_SEED 0x1a +#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SET_RAND_SEED_IN msgrequest */ #define MC_CMD_SET_RAND_SEED_IN_LEN 16 /* Seed value. */ @@ -1863,6 +2154,8 @@ */ #define MC_CMD_DRV_ATTACH 0x1c +#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_DRV_ATTACH_IN msgrequest */ #define MC_CMD_DRV_ATTACH_IN_LEN 12 /* new state (0=detached, 1=attached) to set if UPDATE=1 */ @@ -1875,6 +2168,16 @@ #define MC_CMD_FW_FULL_FEATURED 0x0 /* enum: Prefer to use firmware with fewer features but lower latency */ #define MC_CMD_FW_LOW_LATENCY 0x1 +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +#define MC_CMD_FW_PACKED_STREAM 0x2 +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +#define MC_CMD_FW_HIGH_TX_RATE 0x3 +/* enum: Reserved value */ +#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 +/* enum: Only this option is allowed for non-admin functions */ +#define MC_CMD_FW_DONT_CARE 0xffffffff /* MC_CMD_DRV_ATTACH_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_OUT_LEN 4 @@ -1920,6 +2223,8 @@ */ #define MC_CMD_PORT_RESET 0x20 +#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_PORT_RESET_IN msgrequest */ #define MC_CMD_PORT_RESET_IN_LEN 0 @@ -1934,6 +2239,7 @@ * extended version of the deprecated MC_CMD_PORT_RESET with added fields. */ #define MC_CMD_ENTITY_RESET 0x20 +/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */ /* MC_CMD_ENTITY_RESET_IN msgrequest */ #define MC_CMD_ENTITY_RESET_IN_LEN 4 @@ -2023,6 +2329,8 @@ */ #define MC_CMD_PUTS 0x23 +#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_PUTS_IN msgrequest */ #define MC_CMD_PUTS_IN_LENMIN 13 #define MC_CMD_PUTS_IN_LENMAX 252 @@ -2050,6 +2358,8 @@ */ #define MC_CMD_GET_PHY_CFG 0x24 +#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_PHY_CFG_IN msgrequest */ #define MC_CMD_GET_PHY_CFG_IN_LEN 0 @@ -2149,6 +2459,8 @@ */ #define MC_CMD_START_BIST 0x25 +#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_START_BIST_IN msgrequest */ #define MC_CMD_START_BIST_IN_LEN 4 /* Type of test. */ @@ -2185,6 +2497,8 @@ */ #define MC_CMD_POLL_BIST 0x26 +#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_POLL_BIST_IN msgrequest */ #define MC_CMD_POLL_BIST_IN_LEN 0 @@ -2344,6 +2658,8 @@ */ #define MC_CMD_GET_LOOPBACK_MODES 0x28 +#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */ #define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 @@ -2426,6 +2742,12 @@ #define MC_CMD_LOOPBACK_SD_FES_WS 0x22 /* enum: Near side of AOE Siena side port */ #define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 +/* enum: Medford Wireside datapath loopback */ +#define MC_CMD_LOOPBACK_DATA_WS 0x24 +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 /* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 @@ -2463,6 +2785,8 @@ */ #define MC_CMD_GET_LINK 0x29 +#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_LINK_IN msgrequest */ #define MC_CMD_GET_LINK_IN_LEN 0 @@ -2495,12 +2819,8 @@ #define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 /* This returns the negotiated flow control value. */ #define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 -/* enum: Flow control is off. */ -#define MC_CMD_FCNTL_OFF 0x0 -/* enum: Respond to flow control. */ -#define MC_CMD_FCNTL_RESPOND 0x1 -/* enum: Respond to and Issue flow control. */ -#define MC_CMD_FCNTL_BIDIR 0x2 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ #define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 @@ -2519,6 +2839,8 @@ */ #define MC_CMD_SET_LINK 0x2a +#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK + /* MC_CMD_SET_LINK_IN msgrequest */ #define MC_CMD_SET_LINK_IN_LEN 16 /* ??? */ @@ -2550,6 +2872,8 @@ */ #define MC_CMD_SET_ID_LED 0x2b +#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK + /* MC_CMD_SET_ID_LED_IN msgrequest */ #define MC_CMD_SET_ID_LED_IN_LEN 4 /* Set LED state. */ @@ -2568,8 +2892,10 @@ */ #define MC_CMD_SET_MAC 0x2c +#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK + /* MC_CMD_SET_MAC_IN msgrequest */ -#define MC_CMD_SET_MAC_IN_LEN 24 +#define MC_CMD_SET_MAC_IN_LEN 28 /* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of * EtherII, VLAN, bug16011 padding). */ @@ -2586,13 +2912,20 @@ #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 /* enum: Flow control is off. */ -/* MC_CMD_FCNTL_OFF 0x0 */ +#define MC_CMD_FCNTL_OFF 0x0 /* enum: Respond to flow control. */ -/* MC_CMD_FCNTL_RESPOND 0x1 */ +#define MC_CMD_FCNTL_RESPOND 0x1 /* enum: Respond to and Issue flow control. */ -/* MC_CMD_FCNTL_BIDIR 0x2 */ +#define MC_CMD_FCNTL_BIDIR 0x2 /* enum: Auto neg flow control. */ #define MC_CMD_FCNTL_AUTO 0x3 +/* enum: Priority flow control (eftest builds only). */ +#define MC_CMD_FCNTL_QBB 0x4 +/* enum: Issue flow control. */ +#define MC_CMD_FCNTL_GENERATE 0x5 +#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 /* MC_CMD_SET_MAC_OUT msgresponse */ #define MC_CMD_SET_MAC_OUT_LEN 0 @@ -2609,6 +2942,8 @@ */ #define MC_CMD_PHY_STATS 0x2d +#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK + /* MC_CMD_PHY_STATS_IN msgrequest */ #define MC_CMD_PHY_STATS_IN_LEN 8 /* ??? */ @@ -2683,12 +3018,15 @@ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is * performed, and the statistics may be read from the message response. If * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location). - * Locks required: None. Returns: 0, ETIME + * Locks required: None. The PERIODIC_CLEAR option is not used and now has no + * effect. Returns: 0, ETIME */ #define MC_CMD_MAC_STATS 0x2e +#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_MAC_STATS_IN msgrequest */ -#define MC_CMD_MAC_STATS_IN_LEN 16 +#define MC_CMD_MAC_STATS_IN_LEN 20 /* ??? */ #define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0 #define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8 @@ -2710,6 +3048,8 @@ #define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16 #define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16 #define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 +/* port id so vadapter stats can be provided */ +#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16 /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */ #define MC_CMD_MAC_STATS_OUT_DMA_LEN 0 @@ -2722,6 +3062,7 @@ #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4 #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS #define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ +#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ #define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ #define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ #define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ @@ -2821,14 +3162,34 @@ * PM_AND_RXDP_COUNTERS capability only. */ #define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 -/* enum: RXDP counter: Number of times an emergency descriptor fetch was - * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. +/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed. + * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47 +#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 /* enum: RXDP counter: Number of times the DPCPU waited for an existing * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ /* enum: Start of GMAC stats buffer space, for Siena only. */ #define MC_CMD_GMAC_DMABUF_START 0x40 /* enum: End of GMAC stats buffer space, for Siena only. */ @@ -2926,6 +3287,8 @@ */ #define MC_CMD_WOL_FILTER_SET 0x32 +#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK + /* MC_CMD_WOL_FILTER_SET_IN msgrequest */ #define MC_CMD_WOL_FILTER_SET_IN_LEN 192 #define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 @@ -3020,6 +3383,8 @@ */ #define MC_CMD_WOL_FILTER_REMOVE 0x33 +#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK + /* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */ #define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4 #define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0 @@ -3035,6 +3400,8 @@ */ #define MC_CMD_WOL_FILTER_RESET 0x34 +#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK + /* MC_CMD_WOL_FILTER_RESET_IN msgrequest */ #define MC_CMD_WOL_FILTER_RESET_IN_LEN 4 #define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0 @@ -3069,6 +3436,8 @@ */ #define MC_CMD_NVRAM_TYPES 0x36 +#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_NVRAM_TYPES_IN msgrequest */ #define MC_CMD_NVRAM_TYPES_IN_LEN 0 @@ -3116,6 +3485,8 @@ #define MC_CMD_NVRAM_TYPE_LICENSE 0x12 /* enum: FC Log. */ #define MC_CMD_NVRAM_TYPE_FC_LOG 0x13 +/* enum: Additional flash on FPGA. */ +#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14 /***********************************/ @@ -3125,6 +3496,8 @@ */ #define MC_CMD_NVRAM_INFO 0x37 +#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_NVRAM_INFO_IN msgrequest */ #define MC_CMD_NVRAM_INFO_IN_LEN 4 #define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0 @@ -3157,6 +3530,8 @@ */ #define MC_CMD_NVRAM_UPDATE_START 0x38 +#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */ #define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4 #define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0 @@ -3175,6 +3550,8 @@ */ #define MC_CMD_NVRAM_READ 0x39 +#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_NVRAM_READ_IN msgrequest */ #define MC_CMD_NVRAM_READ_IN_LEN 12 #define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0 @@ -3202,6 +3579,8 @@ */ #define MC_CMD_NVRAM_WRITE 0x3a +#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_NVRAM_WRITE_IN msgrequest */ #define MC_CMD_NVRAM_WRITE_IN_LENMIN 13 #define MC_CMD_NVRAM_WRITE_IN_LENMAX 252 @@ -3228,6 +3607,8 @@ */ #define MC_CMD_NVRAM_ERASE 0x3b +#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_NVRAM_ERASE_IN msgrequest */ #define MC_CMD_NVRAM_ERASE_IN_LEN 12 #define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0 @@ -3248,6 +3629,8 @@ */ #define MC_CMD_NVRAM_UPDATE_FINISH 0x3c +#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */ #define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8 #define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0 @@ -3279,6 +3662,8 @@ */ #define MC_CMD_REBOOT 0x3d +#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_REBOOT_IN msgrequest */ #define MC_CMD_REBOOT_IN_LEN 4 #define MC_CMD_REBOOT_IN_FLAGS_OFST 0 @@ -3296,6 +3681,8 @@ */ #define MC_CMD_SCHEDINFO 0x3e +#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SCHEDINFO_IN msgrequest */ #define MC_CMD_SCHEDINFO_IN_LEN 0 @@ -3316,6 +3703,8 @@ */ #define MC_CMD_REBOOT_MODE 0x3f +#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_REBOOT_MODE_IN msgrequest */ #define MC_CMD_REBOOT_MODE_IN_LEN 4 #define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 @@ -3368,6 +3757,8 @@ */ #define MC_CMD_SENSOR_INFO 0x41 +#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SENSOR_INFO_IN msgrequest */ #define MC_CMD_SENSOR_INFO_IN_LEN 0 @@ -3478,6 +3869,68 @@ #define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c /* enum: Hotpoint temperature: degC */ #define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d +/* enum: Port 0 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e +/* enum: Port 1 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f +/* enum: Mop-up microcontroller reference voltage (millivolts) */ +#define MC_CMD_SENSOR_MUM_VCC 0x30 +/* enum: 0.9v power phase A voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_A 0x31 +/* enum: 0.9v power phase A current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_A 0x32 +/* enum: 0.9V voltage regulator phase A temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 +/* enum: 0.9v power phase B voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_B 0x34 +/* enum: 0.9v power phase B current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_B 0x35 +/* enum: 0.9V voltage regulator phase B temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 +/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 +/* enum: CCOM AVREG 1v2 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 +/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 +/* enum: CCOM AVREG 1v8 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f +/* enum: controller internal temperature sensor voltage on master core + * (internal ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 +/* enum: controller internal temperature on master core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 +/* enum: controller internal temperature sensor voltage on master core + * (external ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 +/* enum: controller internal temperature on master core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 +/* enum: controller internal temperature on slave core sensor voltage (internal + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 +/* enum: controller internal temperature on slave core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 +/* enum: controller internal temperature on slave core sensor voltage (external + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 +/* enum: controller internal temperature on slave core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 +/* enum: Voltage supplied to the SODIMMs from their power supply: mV */ +#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 +/* enum: Temperature of SODIMM 0 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a +/* enum: Temperature of SODIMM 1 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b +/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY0_VCC 0x4c +/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY1_VCC 0x4d /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ #define MC_CMD_SENSOR_ENTRY_OFST 4 #define MC_CMD_SENSOR_ENTRY_LEN 8 @@ -3542,6 +3995,8 @@ */ #define MC_CMD_READ_SENSORS 0x42 +#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_READ_SENSORS_IN msgrequest */ #define MC_CMD_READ_SENSORS_IN_LEN 8 /* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */ @@ -3584,6 +4039,8 @@ #define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum: Sensor is working but does not currently have a reading. */ #define MC_CMD_SENSOR_STATE_NO_READING 0x4 +/* enum: Sensor initialisation failed. */ +#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 @@ -3602,6 +4059,8 @@ */ #define MC_CMD_GET_PHY_STATE 0x43 +#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_PHY_STATE_IN msgrequest */ #define MC_CMD_GET_PHY_STATE_IN_LEN 0 @@ -3636,6 +4095,8 @@ */ #define MC_CMD_WOL_FILTER_GET 0x45 +#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK + /* MC_CMD_WOL_FILTER_GET_IN msgrequest */ #define MC_CMD_WOL_FILTER_GET_IN_LEN 0 @@ -3651,6 +4112,8 @@ */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 +#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK + /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252 @@ -3692,6 +4155,8 @@ */ #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 +#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK + /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */ #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8 #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 @@ -3722,6 +4187,8 @@ */ #define MC_CMD_TESTASSERT 0x49 +#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_TESTASSERT_IN msgrequest */ #define MC_CMD_TESTASSERT_IN_LEN 0 @@ -3739,8 +4206,11 @@ */ #define MC_CMD_WORKAROUND 0x4a +#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_WORKAROUND_IN msgrequest */ #define MC_CMD_WORKAROUND_IN_LEN 8 +/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */ #define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 /* enum: Bug 17230 work around. */ #define MC_CMD_WORKAROUND_BUG17230 0x1 @@ -3748,11 +4218,38 @@ #define MC_CMD_WORKAROUND_BUG35388 0x2 /* enum: Bug35017 workaround (A64 tables must be identity map) */ #define MC_CMD_WORKAROUND_BUG35017 0x3 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_WORKAROUND_BUG41750 0x4 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_WORKAROUND_BUG42008 0x5 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) + * This feature cannot be turned on/off while there are any filters already + * present. The behaviour in such case depends on the acting client's privilege + * level. If the client has the admin privilege, then all functions that have + * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise + * the command will fail with MC_CMD_ERR_FILTERS_PRESENT. + */ +#define MC_CMD_WORKAROUND_BUG26807 0x6 +/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable + * the workaround + */ #define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 /* MC_CMD_WORKAROUND_OUT msgresponse */ #define MC_CMD_WORKAROUND_OUT_LEN 0 +/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used + * when (TYPE == MC_CMD_WORKAROUND_BUG26807) + */ +#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 + /***********************************/ /* MC_CMD_GET_PHY_MEDIA_INFO @@ -3765,6 +4262,8 @@ */ #define MC_CMD_GET_PHY_MEDIA_INFO 0x4b +#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */ #define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4 #define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0 @@ -3788,6 +4287,8 @@ */ #define MC_CMD_NVRAM_TEST 0x4c +#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_NVRAM_TEST_IN msgrequest */ #define MC_CMD_NVRAM_TEST_IN_LEN 4 #define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0 @@ -3849,6 +4350,8 @@ */ #define MC_CMD_SENSOR_SET_LIMS 0x4e +#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */ #define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 #define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 @@ -3890,6 +4393,8 @@ */ #define MC_CMD_NVRAM_PARTITIONS 0x51 +#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */ #define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0 @@ -3913,6 +4418,8 @@ */ #define MC_CMD_NVRAM_METADATA 0x52 +#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_NVRAM_METADATA_IN msgrequest */ #define MC_CMD_NVRAM_METADATA_IN_LEN 4 /* Partition type ID code */ @@ -3954,10 +4461,12 @@ /***********************************/ /* MC_CMD_GET_MAC_ADDRESSES - * Returns the base MAC, count and stride for the requestiong function + * Returns the base MAC, count and stride for the requesting function */ #define MC_CMD_GET_MAC_ADDRESSES 0x55 +#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */ #define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0 @@ -3974,6 +4483,527 @@ /* Spacing of allocated MAC addresses */ #define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 + +/***********************************/ +/* MC_CMD_CLP + * Perform a CLP related operation + */ +#define MC_CMD_CLP 0x56 + +#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_CLP_IN msgrequest */ +#define MC_CMD_CLP_IN_LEN 4 +/* Sub operation */ +#define MC_CMD_CLP_IN_OP_OFST 0 +/* enum: Return to factory default settings */ +#define MC_CMD_CLP_OP_DEFAULT 0x1 +/* enum: Set MAC address */ +#define MC_CMD_CLP_OP_SET_MAC 0x2 +/* enum: Get MAC address */ +#define MC_CMD_CLP_OP_GET_MAC 0x3 +/* enum: Set UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_SET_BOOT 0x4 +/* enum: Get UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_GET_BOOT 0x5 + +/* MC_CMD_CLP_OUT msgresponse */ +#define MC_CMD_CLP_OUT_LEN 0 + +/* MC_CMD_CLP_IN_DEFAULT msgrequest */ +#define MC_CMD_CLP_IN_DEFAULT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ + +/* MC_CMD_CLP_OUT_DEFAULT msgresponse */ +#define MC_CMD_CLP_OUT_DEFAULT_LEN 0 + +/* MC_CMD_CLP_IN_SET_MAC msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_LEN 12 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MAC address assigned to port */ +#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_OUT_SET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_SET_MAC_LEN 0 + +/* MC_CMD_CLP_IN_GET_MAC msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ + +/* MC_CMD_CLP_OUT_GET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_GET_MAC_LEN 8 +/* MAC address assigned to port */ +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0 +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6 +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_IN_SET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_SET_BOOT_LEN 5 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* Boot flag */ +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4 +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1 + +/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0 + +/* MC_CMD_CLP_IN_GET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_GET_BOOT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ + +/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4 +/* Boot flag */ +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0 +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1 +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3 + + +/***********************************/ +/* MC_CMD_MUM + * Perform a MUM operation + */ +#define MC_CMD_MUM 0x57 + +#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_MUM_IN msgrequest */ +#define MC_CMD_MUM_IN_LEN 4 +#define MC_CMD_MUM_IN_OP_HDR_OFST 0 +#define MC_CMD_MUM_IN_OP_LBN 0 +#define MC_CMD_MUM_IN_OP_WIDTH 8 +/* enum: NULL MCDI command to MUM */ +#define MC_CMD_MUM_OP_NULL 0x1 +/* enum: Get MUM version */ +#define MC_CMD_MUM_OP_GET_VERSION 0x2 +/* enum: Issue raw I2C command to MUM */ +#define MC_CMD_MUM_OP_RAW_CMD 0x3 +/* enum: Read from registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_READ 0x4 +/* enum: Write to registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_WRITE 0x5 +/* enum: Control UART logging. */ +#define MC_CMD_MUM_OP_LOG 0x6 +/* enum: Operations on MUM GPIO lines */ +#define MC_CMD_MUM_OP_GPIO 0x7 +/* enum: Get sensor readings from MUM */ +#define MC_CMD_MUM_OP_READ_SENSORS 0x8 +/* enum: Initiate clock programming on the MUM */ +#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9 +/* enum: Initiate FPGA load from flash on the MUM */ +#define MC_CMD_MUM_OP_FPGA_LOAD 0xa +/* enum: Request sensor reading from MUM ADC resulting from earlier request via + * MUM ATB + */ +#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb +/* enum: Send commands relating to the QSFP ports via the MUM for PHY + * operations + */ +#define MC_CMD_MUM_OP_QSFP 0xc + +/* MC_CMD_MUM_IN_NULL msgrequest */ +#define MC_CMD_MUM_IN_NULL_LEN 4 +/* MUM cmd header */ +#define MC_CMD_MUM_IN_CMD_OFST 0 + +/* MC_CMD_MUM_IN_GET_VERSION msgrequest */ +#define MC_CMD_MUM_IN_GET_VERSION_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ + +/* MC_CMD_MUM_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_READ_LEN 16 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* ID of (device connected to MUM) to read from registers of */ +#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE 0x1 +/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2 +/* 32-bit address to read from */ +#define MC_CMD_MUM_IN_READ_ADDR_OFST 8 +/* Number of words to read. */ +#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12 + +/* MC_CMD_MUM_IN_WRITE msgrequest */ +#define MC_CMD_MUM_IN_WRITE_LENMIN 16 +#define MC_CMD_MUM_IN_WRITE_LENMAX 252 +#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* ID of (device connected to MUM) to write to registers of */ +#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +/* MC_CMD_MUM_DEV_HITTITE 0x1 */ +/* 32-bit address to write to */ +#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8 +/* Words to write */ +#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60 + +/* MC_CMD_MUM_IN_RAW_CMD msgrequest */ +#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MUM I2C cmd code */ +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4 +/* Number of bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8 +/* Number of bytes to read */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12 +/* Bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236 + +/* MC_CMD_MUM_IN_LOG msgrequest */ +#define MC_CMD_MUM_IN_LOG_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_LOG_OP_OFST 4 +#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ + +/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ +#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */ +/* Enable/disable debug output to UART */ +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8 + +/* MC_CMD_MUM_IN_GPIO msgrequest */ +#define MC_CMD_MUM_IN_GPIO_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */ + +/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4 +/* The first 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8 +/* The second 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12 + +/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4 +/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8 +/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OP msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16 +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */ +#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8 + +/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* Bit-mask of clocks to be programmed */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4 +#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */ +/* Control flags for clock programming */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 + +/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */ +#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* Enable/Disable FPGA config from flash */ +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4 + +/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */ +#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ + +/* MC_CMD_MUM_IN_QSFP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 +#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */ +#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8 + +/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */ +#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12 + +/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20 + +/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8 + +/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12 + +/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8 + +/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 + +/* MC_CMD_MUM_OUT msgresponse */ +#define MC_CMD_MUM_OUT_LEN 0 + +/* MC_CMD_MUM_OUT_NULL msgresponse */ +#define MC_CMD_MUM_OUT_NULL_LEN 0 + +/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */ +#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8 + +/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */ +#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num)) +/* returned data */ +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252 + +/* MC_CMD_MUM_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_READ_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4 +#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63 + +/* MC_CMD_MUM_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_LOG msgresponse */ +#define MC_CMD_MUM_OUT_LOG_LEN 0 + +/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */ +#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8 +/* The first 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0 +/* The second 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8 +/* The first 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0 +/* The second 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0 + +/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */ +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8 + +/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */ +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0 + +/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */ +#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0 + +/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */ +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0 + +/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0 + +/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1 + +/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0 + +/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) +/* in bytes */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248 + +/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4 + +/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 + /* MC_CMD_RESOURCE_SPECIFIER enum */ /* enum: Any */ #define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff @@ -4062,6 +5092,30 @@ #define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 /* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ #define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff +/* enum: Primary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA 0xb00 +/* enum: Secondary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 +/* enum: FC firmware partition */ +#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 +/* enum: FC License partition */ +#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 +/* enum: Non-volatile log output partition for FC */ +#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 +/* enum: MUM firmware partition */ +#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +/* enum: MUM Non-volatile log output partition. */ +#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 +/* enum: MUM Application table partition. */ +#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 +/* enum: MUM boot rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 +/* enum: MUM production signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 +/* enum: MUM user signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 +/* enum: MUM fuses and lockbits partition. */ +#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 /* enum: Start of reserved value range (firmware may use for any purpose) */ #define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ @@ -4077,14 +5131,70 @@ #define LICENSED_APP_ID_LEN 4 #define LICENSED_APP_ID_ID_OFST 0 /* enum: OpenOnload */ -#define LICENSED_APP_ID_ONLOAD 0x1 +#define LICENSED_APP_ID_ONLOAD 0x1 /* enum: PTP timestamping */ -#define LICENSED_APP_ID_PTP 0x2 +#define LICENSED_APP_ID_PTP 0x2 /* enum: SolarCapture Pro */ -#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +/* enum: SolarSecure filter engine */ +#define LICENSED_APP_ID_SOLARSECURE 0x8 +/* enum: Performance monitor */ +#define LICENSED_APP_ID_PERF_MONITOR 0x10 +/* enum: SolarCapture Live */ +#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 +/* enum: Capture SolarSystem */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 +/* enum: Network Access Control */ +#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 #define LICENSED_APP_ID_ID_LBN 0 #define LICENSED_APP_ID_ID_WIDTH 32 +/* TX_TIMESTAMP_EVENT structuredef */ +#define TX_TIMESTAMP_EVENT_LEN 6 +/* lower 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16 +/* Type of TX event, ordinary TX completion, low or high part of TX timestamp + */ +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1 +/* enum: This is a TX completion event, not a timestamp */ +#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +/* enum: This is the low part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 +/* enum: This is the high part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8 +/* upper 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16 + +/* RSS_MODE structuredef */ +#define RSS_MODE_LEN 1 +/* The RSS mode for a particular packet type is a value from 0 - 15 which can + * be considered as 4 bits selecting which fields are included in the hash. (A + * value 0 effectively disables RSS spreading for the packet type.) The YAML + * generation tools require this structure to be a whole number of bytes wide, + * but only 4 bits are relevant. + */ +#define RSS_MODE_HASH_SELECTOR_OFST 0 +#define RSS_MODE_HASH_SELECTOR_LEN 1 +#define RSS_MODE_HASH_SRC_ADDR_LBN 0 +#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1 +#define RSS_MODE_HASH_DST_ADDR_LBN 1 +#define RSS_MODE_HASH_DST_ADDR_WIDTH 1 +#define RSS_MODE_HASH_SRC_PORT_LBN 2 +#define RSS_MODE_HASH_SRC_PORT_WIDTH 1 +#define RSS_MODE_HASH_DST_PORT_LBN 3 +#define RSS_MODE_HASH_DST_PORT_WIDTH 1 +#define RSS_MODE_HASH_SELECTOR_LBN 0 +#define RSS_MODE_HASH_SELECTOR_WIDTH 8 + /***********************************/ /* MC_CMD_READ_REGS @@ -4092,6 +5202,8 @@ */ #define MC_CMD_READ_REGS 0x50 +#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_READ_REGS_IN msgrequest */ #define MC_CMD_READ_REGS_IN_LEN 0 @@ -4115,6 +5227,8 @@ */ #define MC_CMD_INIT_EVQ 0x80 +#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_INIT_EVQ_IN msgrequest */ #define MC_CMD_INIT_EVQ_IN_LENMIN 44 #define MC_CMD_INIT_EVQ_IN_LENMAX 548 @@ -4213,7 +5327,11 @@ */ #define MC_CMD_INIT_RXQ 0x81 -/* MC_CMD_INIT_RXQ_IN msgrequest */ +#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version + * in new code. + */ #define MC_CMD_INIT_RXQ_IN_LENMIN 36 #define MC_CMD_INIT_RXQ_IN_LENMAX 252 #define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) @@ -4256,16 +5374,84 @@ #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28 +/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode + * flags + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to INIT_EVQ + */ +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 + /* MC_CMD_INIT_RXQ_OUT msgresponse */ #define MC_CMD_INIT_RXQ_OUT_LEN 0 +/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0 + /***********************************/ /* MC_CMD_INIT_TXQ */ #define MC_CMD_INIT_TXQ 0x82 -/* MC_CMD_INIT_TXQ_IN msgrequest */ +#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version + * in new code. + */ #define MC_CMD_INIT_TXQ_IN_LENMIN 36 #define MC_CMD_INIT_TXQ_IN_LENMAX 252 #define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) @@ -4297,6 +5483,10 @@ #define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1 #define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9 #define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -4309,6 +5499,60 @@ #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28 +/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode + * flags + */ +#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 +/* Flags related to Qbb flow control mode. */ +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3 + /* MC_CMD_INIT_TXQ_OUT msgresponse */ #define MC_CMD_INIT_TXQ_OUT_LEN 0 @@ -4322,6 +5566,8 @@ */ #define MC_CMD_FINI_EVQ 0x83 +#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_FINI_EVQ_IN msgrequest */ #define MC_CMD_FINI_EVQ_IN_LEN 4 /* Instance of EVQ to destroy. Should be the same instance as that previously @@ -4339,6 +5585,8 @@ */ #define MC_CMD_FINI_RXQ 0x84 +#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_FINI_RXQ_IN msgrequest */ #define MC_CMD_FINI_RXQ_IN_LEN 4 /* Instance of RXQ to destroy */ @@ -4354,6 +5602,8 @@ */ #define MC_CMD_FINI_TXQ 0x85 +#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_FINI_TXQ_IN msgrequest */ #define MC_CMD_FINI_TXQ_IN_LEN 4 /* Instance of TXQ to destroy */ @@ -4369,6 +5619,8 @@ */ #define MC_CMD_DRIVER_EVENT 0x86 +#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_DRIVER_EVENT_IN msgrequest */ #define MC_CMD_DRIVER_EVENT_IN_LEN 12 /* Handle of target EVQ */ @@ -4392,6 +5644,8 @@ */ #define MC_CMD_PROXY_CMD 0x5b +#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_PROXY_CMD_IN msgrequest */ #define MC_CMD_PROXY_CMD_IN_LEN 4 /* The handle of the target function. */ @@ -4405,6 +5659,132 @@ /* MC_CMD_PROXY_CMD_OUT msgresponse */ #define MC_CMD_PROXY_CMD_OUT_LEN 0 +/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to + * manage proxied requests + */ +#define MC_PROXY_STATUS_BUFFER_LEN 16 +/* Handle allocated by the firmware for this proxy transaction */ +#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0 +/* enum: An invalid handle. */ +#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32 +/* The requesting physical function number */ +#define MC_PROXY_STATUS_BUFFER_PF_OFST 4 +#define MC_PROXY_STATUS_BUFFER_PF_LEN 2 +#define MC_PROXY_STATUS_BUFFER_PF_LBN 32 +#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16 +/* The requesting virtual function number. Set to VF_NULL if the target is a + * PF. + */ +#define MC_PROXY_STATUS_BUFFER_VF_OFST 6 +#define MC_PROXY_STATUS_BUFFER_VF_LEN 2 +#define MC_PROXY_STATUS_BUFFER_VF_LBN 48 +#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16 +/* The target function RID. */ +#define MC_PROXY_STATUS_BUFFER_RID_OFST 8 +#define MC_PROXY_STATUS_BUFFER_RID_LEN 2 +#define MC_PROXY_STATUS_BUFFER_RID_LBN 64 +#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16 +/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */ +#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10 +#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2 +#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80 +#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16 +/* If a request is authorized rather than carried out by the host, this is the + * elevated privilege mask granted to the requesting function. + */ +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32 + + +/***********************************/ +/* MC_CMD_PROXY_CONFIGURE + * Enable/disable authorization of MCDI requests from unprivileged functions by + * a designated admin function + */ +#define MC_CMD_PROXY_CONFIGURE 0x58 + +#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */ +#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108 +#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REQUEST_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REPLY_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if + * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32 +/* Must be a power of 2, or zero if this buffer is not provided */ +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36 +/* Applies to all three buffers */ +#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40 +/* A bit mask defining which MCDI operations may be proxied */ +#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 +#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 + +/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ +#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PROXY_COMPLETE + * Tells FW that a requested proxy operation has either been completed (by + * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the + * function that enabled proxying/authorization (by using + * MC_CMD_PROXY_CONFIGURE). + */ +#define MC_CMD_PROXY_COMPLETE 0x5f + +#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_COMPLETE_IN msgrequest */ +#define MC_CMD_PROXY_COMPLETE_IN_LEN 12 +#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0 +#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4 +/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply + * is stored in the REPLY_BUFF. + */ +#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0 +/* enum: The operation has been authorized. The originating function may now + * try again. + */ +#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1 +/* enum: The operation has been declined. */ +#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2 +/* enum: The authorization failed because the relevant application did not + * respond in time. + */ +#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3 +#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8 + +/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */ +#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0 + /***********************************/ /* MC_CMD_ALLOC_BUFTBL_CHUNK @@ -4414,6 +5794,8 @@ */ #define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87 +#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + /* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */ #define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8 /* Owner ID to use */ @@ -4437,6 +5819,8 @@ */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88 +#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + /* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268 @@ -4463,6 +5847,8 @@ */ #define MC_CMD_FREE_BUFTBL_CHUNK 0x89 +#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + /* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */ #define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4 #define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0 @@ -4470,6 +5856,44 @@ /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ #define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 +/* PORT_CONFIG_ENTRY structuredef */ +#define PORT_CONFIG_ENTRY_LEN 16 +/* External port number (label) */ +#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0 +#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1 +#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0 +#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8 +/* Port core location */ +#define PORT_CONFIG_ENTRY_CORE_OFST 1 +#define PORT_CONFIG_ENTRY_CORE_LEN 1 +#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */ +#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */ +#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */ +#define PORT_CONFIG_ENTRY_CORE_LBN 8 +#define PORT_CONFIG_ENTRY_CORE_WIDTH 8 +/* Internal number (HW resource) relative to the core */ +#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2 +#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1 +#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16 +#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8 +/* Reserved */ +#define PORT_CONFIG_ENTRY_RSVD_OFST 3 +#define PORT_CONFIG_ENTRY_RSVD_LEN 1 +#define PORT_CONFIG_ENTRY_RSVD_LBN 24 +#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8 +/* Bitmask of KR lanes used by the port */ +#define PORT_CONFIG_ENTRY_LANES_OFST 4 +#define PORT_CONFIG_ENTRY_LANES_LBN 32 +#define PORT_CONFIG_ENTRY_LANES_WIDTH 32 +/* Port capabilities (MC_CMD_PHY_CAP_*) */ +#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8 +#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64 +#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32 +/* Reserved (align to 16 bytes) */ +#define PORT_CONFIG_ENTRY_RSVD2_OFST 12 +#define PORT_CONFIG_ENTRY_RSVD2_LBN 96 +#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32 + /***********************************/ /* MC_CMD_FILTER_OP @@ -4477,6 +5901,8 @@ */ #define MC_CMD_FILTER_OP 0x8a +#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_FILTER_OP_IN msgrequest */ #define MC_CMD_FILTER_OP_IN_LEN 108 /* identifies the type of operation requested */ @@ -4539,9 +5965,9 @@ #define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 /* enum: receive to MC */ #define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 -/* enum: loop back to port 0 TX MAC */ +/* enum: loop back to TXDP 0 */ #define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 -/* enum: loop back to port 1 TX MAC */ +/* enum: loop back to TXDP 1 */ #define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 /* receive queue handle (for multiple queue modes, this is the base queue) */ #define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 @@ -4558,9 +5984,7 @@ #define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or - * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered - * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be - * a valid handle. + * MC_CMD_DOT1P_MAPPING_ALLOC. */ #define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 /* transmit domain (reserved; set to 0) */ @@ -4615,6 +6039,235 @@ #define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92 #define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16 +/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to + * include handling of VXLAN/NVGRE encapsulated frame filtering (which is + * supported on Medford only). + */ +#define MC_CMD_FILTER_OP_EXT_IN_LEN 172 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 +/* receive mode */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16 + /* MC_CMD_FILTER_OP_OUT msgresponse */ #define MC_CMD_FILTER_OP_OUT_LEN 12 /* identifies the type of operation requested */ @@ -4629,6 +6282,27 @@ #define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 #define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 #define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 +/* enum: guaranteed invalid filter handle (low 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff +/* enum: guaranteed invalid filter handle (high 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff + +/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ +#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_EXT_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_OUT/HANDLE */ /***********************************/ @@ -4637,12 +6311,18 @@ */ #define MC_CMD_GET_PARSER_DISP_INFO 0xe4 +#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */ #define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4 /* identifies the type of operation requested */ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 /* enum: read the list of supported RX filter matches */ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 +/* enum: read flags indicating restrictions on filter insertion for the calling + * client + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 @@ -4662,6 +6342,17 @@ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0 #define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61 +/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* bitfield of filter insertion restrictions */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 + /***********************************/ /* MC_CMD_PARSER_DISP_RW @@ -4669,6 +6360,8 @@ */ #define MC_CMD_PARSER_DISP_RW 0xe5 +#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_PARSER_DISP_RW_IN msgrequest */ #define MC_CMD_PARSER_DISP_RW_IN_LEN 32 /* identifies the target of the operation */ @@ -4677,8 +6370,10 @@ #define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 /* enum: TX dispatcher CPU */ #define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 -/* enum: Lookup engine */ +/* enum: Lookup engine (with original metadata format) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 +/* enum: Lookup engine (with requested metadata format) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 /* identifies the type of operation requested */ #define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 /* enum: read a word of DICPU DMEM or a LUE entry */ @@ -4695,6 +6390,8 @@ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16 +/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12 /* value to write (for LUE writes) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12 #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20 @@ -4719,6 +6416,8 @@ */ #define MC_CMD_GET_PF_COUNT 0xb6 +#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_PF_COUNT_IN msgrequest */ #define MC_CMD_GET_PF_COUNT_IN_LEN 0 @@ -4750,6 +6449,8 @@ */ #define MC_CMD_GET_PORT_ASSIGNMENT 0xb8 +#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */ #define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0 @@ -4765,6 +6466,8 @@ */ #define MC_CMD_SET_PORT_ASSIGNMENT 0xb9 +#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */ #define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4 /* Identifies the port assignment for this function. */ @@ -4780,6 +6483,8 @@ */ #define MC_CMD_ALLOC_VIS 0x8b +#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_ALLOC_VIS_IN msgrequest */ #define MC_CMD_ALLOC_VIS_IN_LEN 8 /* The minimum number of VIs that is acceptable */ @@ -4787,7 +6492,9 @@ /* The maximum number of VIs that would be useful */ #define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 -/* MC_CMD_ALLOC_VIS_OUT msgresponse */ +/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request. + * Use extended version in new code. + */ #define MC_CMD_ALLOC_VIS_OUT_LEN 8 /* The number of VIs allocated on this function */ #define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 @@ -4796,6 +6503,17 @@ */ #define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 +/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8 + /***********************************/ /* MC_CMD_FREE_VIS @@ -4804,6 +6522,8 @@ */ #define MC_CMD_FREE_VIS 0x8c +#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_FREE_VIS_IN msgrequest */ #define MC_CMD_FREE_VIS_IN_LEN 0 @@ -4817,6 +6537,8 @@ */ #define MC_CMD_GET_SRIOV_CFG 0xba +#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_SRIOV_CFG_IN msgrequest */ #define MC_CMD_GET_SRIOV_CFG_IN_LEN 0 @@ -4841,6 +6563,8 @@ */ #define MC_CMD_SET_SRIOV_CFG 0xbb +#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SET_SRIOV_CFG_IN msgrequest */ #define MC_CMD_SET_SRIOV_CFG_IN_LEN 20 /* Number of VFs currently enabled. */ @@ -4870,17 +6594,21 @@ */ #define MC_CMD_GET_VI_ALLOC_INFO 0x8d +#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */ #define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0 /* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */ -#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12 /* The number of VIs allocated on this function */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8 /***********************************/ @@ -4889,6 +6617,8 @@ */ #define MC_CMD_DUMP_VI_STATE 0x8e +#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_DUMP_VI_STATE_IN msgrequest */ #define MC_CMD_DUMP_VI_STATE_IN_LEN 4 /* The VI number to query. */ @@ -4998,6 +6728,8 @@ */ #define MC_CMD_ALLOC_PIOBUF 0x8f +#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + /* MC_CMD_ALLOC_PIOBUF_IN msgrequest */ #define MC_CMD_ALLOC_PIOBUF_IN_LEN 0 @@ -5013,6 +6745,8 @@ */ #define MC_CMD_FREE_PIOBUF 0x90 +#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + /* MC_CMD_FREE_PIOBUF_IN msgrequest */ #define MC_CMD_FREE_PIOBUF_IN_LEN 4 /* Handle for allocated push I/O buffer. */ @@ -5028,6 +6762,8 @@ */ #define MC_CMD_GET_VI_TLP_PROCESSING 0xb0 +#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */ #define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4 /* VI number to get information for. */ @@ -5062,6 +6798,8 @@ */ #define MC_CMD_SET_VI_TLP_PROCESSING 0xb1 +#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */ #define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8 /* VI number to set information for. */ @@ -5096,6 +6834,8 @@ */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc +#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 @@ -5157,6 +6897,8 @@ */ #define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd +#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */ #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 @@ -5203,6 +6945,8 @@ */ #define MC_CMD_SATELLITE_DOWNLOAD 0x91 +#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs * are subtle, and so downloads must proceed in a number of phases. * @@ -5318,6 +7062,8 @@ */ #define MC_CMD_GET_CAPABILITIES 0xbe +#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_CAPABILITIES_IN msgrequest */ #define MC_CMD_GET_CAPABILITIES_IN_LEN 0 @@ -5325,6 +7071,20 @@ #define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19 #define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20 @@ -5343,6 +7103,14 @@ #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 @@ -5350,6 +7118,10 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ @@ -5373,6 +7145,10 @@ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ @@ -5383,22 +7159,69 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */ +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 /* Licensed capabilities */ @@ -5433,6 +7256,8 @@ */ #define MC_CMD_TCM_BUCKET_ALLOC 0xb2 +#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */ #define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0 @@ -5448,6 +7273,8 @@ */ #define MC_CMD_TCM_BUCKET_FREE 0xb3 +#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */ #define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4 /* the bucket id */ @@ -5463,6 +7290,8 @@ */ #define MC_CMD_TCM_BUCKET_INIT 0xb4 +#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */ #define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8 /* the bucket id */ @@ -5470,6 +7299,15 @@ /* the rate in mbps */ #define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4 +/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0 +/* the rate in mbps */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4 +/* the desired maximum fill level */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8 + /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */ #define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0 @@ -5480,14 +7318,22 @@ */ #define MC_CMD_TCM_TXQ_INIT 0xb5 +#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_TCM_TXQ_INIT_IN msgrequest */ #define MC_CMD_TCM_TXQ_INIT_IN_LEN 28 /* the txq id */ #define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4 -/* bitmask of the priority queues this txq is inserted into */ +/* bitmask of the priority queues this txq is inserted into when inserted. */ #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1 /* the reaction point (RP) bucket */ #define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12 /* an already reserved bucket (typically set to bucket associated with outer @@ -5501,6 +7347,35 @@ /* the min bucket (typically for ETS/minimum bandwidth) */ #define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24 +/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32 +/* the txq id */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4 +/* bitmask of the priority queues this txq is inserted into when inserted. */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1 +/* the reaction point (RP) bucket */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12 +/* an already reserved bucket (typically set to bucket associated with outer + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16 +/* an already reserved bucket (typically set to bucket associated with inner + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20 +/* the min bucket (typically for ETS/minimum bandwidth) */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28 + /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */ #define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0 @@ -5511,6 +7386,8 @@ */ #define MC_CMD_LINK_PIOBUF 0x92 +#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + /* MC_CMD_LINK_PIOBUF_IN msgrequest */ #define MC_CMD_LINK_PIOBUF_IN_LEN 8 /* Handle for allocated push I/O buffer. */ @@ -5528,6 +7405,8 @@ */ #define MC_CMD_UNLINK_PIOBUF 0x93 +#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + /* MC_CMD_UNLINK_PIOBUF_IN msgrequest */ #define MC_CMD_UNLINK_PIOBUF_IN_LEN 4 /* Function Local Instance (VI) number. */ @@ -5543,6 +7422,8 @@ */ #define MC_CMD_VSWITCH_ALLOC 0x94 +#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_VSWITCH_ALLOC_IN msgrequest */ #define MC_CMD_VSWITCH_ALLOC_IN_LEN 16 /* The port to connect to the v-switch's upstream port. */ @@ -5553,13 +7434,23 @@ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 /* enum: VEB */ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 -/* enum: VEPA */ +/* enum: VEPA (obsolete) */ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +/* enum: MUX */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 +/* enum: Snapper specific; semantics TBD */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 /* Flags controlling v-port creation */ #define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 -/* The number of VLAN tags to support. */ +/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators, + * this must be one or greated, and the attached v-ports must have exactly this + * number of tags. For other v-switch types, this must be zero of greater, and + * is an upper limit on the number of VLAN tags for attached v-ports. An error + * will be returned if existing configuration means we can't support attached + * v-ports with this number of tags. + */ #define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ @@ -5572,6 +7463,8 @@ */ #define MC_CMD_VSWITCH_FREE 0x95 +#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_VSWITCH_FREE_IN msgrequest */ #define MC_CMD_VSWITCH_FREE_IN_LEN 4 /* The port to which the v-switch is connected. */ @@ -5587,6 +7480,8 @@ */ #define MC_CMD_VPORT_ALLOC 0x96 +#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_VPORT_ALLOC_IN msgrequest */ #define MC_CMD_VPORT_ALLOC_IN_LEN 20 /* The port to which the v-switch is connected. */ @@ -5615,7 +7510,10 @@ #define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 -/* The number of VLAN tags to insert/remove. */ +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ #define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 @@ -5636,6 +7534,8 @@ */ #define MC_CMD_VPORT_FREE 0x97 +#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_VPORT_FREE_IN msgrequest */ #define MC_CMD_VPORT_FREE_IN_LEN 4 /* The handle of the v-port */ @@ -5651,8 +7551,10 @@ */ #define MC_CMD_VADAPTOR_ALLOC 0x98 +#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */ -#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30 /* The port to connect to the v-adaptor's port. */ #define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 /* Flags controlling v-adaptor creation */ @@ -5661,6 +7563,19 @@ #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 /* The number of VLAN tags to strip on receive */ #define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 +/* The number of VLAN tags to transparently insert/remove. */ +#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16 +/* The MAC address to assign to this v-adaptor */ +#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24 +#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6 +/* enum: Derive the MAC address from the upstream port */ +#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0 /* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */ #define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0 @@ -5672,6 +7587,8 @@ */ #define MC_CMD_VADAPTOR_FREE 0x99 +#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_VADAPTOR_FREE_IN msgrequest */ #define MC_CMD_VADAPTOR_FREE_IN_LEN 4 /* The port to which the v-adaptor is connected. */ @@ -5682,11 +7599,53 @@ /***********************************/ +/* MC_CMD_VADAPTOR_SET_MAC + * assign a new MAC address to a v-adaptor. + */ +#define MC_CMD_VADAPTOR_SET_MAC 0x5d + +#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 +/* The new MAC address to assign to this v-adaptor */ +#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4 +#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6 + +/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VADAPTOR_GET_MAC + * read the MAC address assigned to a v-adaptor. + */ +#define MC_CMD_VADAPTOR_GET_MAC 0x5e + +#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */ +#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 + +/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */ +#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6 +/* The MAC address assigned to this v-adaptor */ +#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0 +#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6 + + +/***********************************/ /* MC_CMD_EVB_PORT_ASSIGN * assign a port to a PCI function. */ #define MC_CMD_EVB_PORT_ASSIGN 0x9a +#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */ #define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8 /* The port to assign. */ @@ -5708,6 +7667,8 @@ */ #define MC_CMD_RDWR_A64_REGIONS 0x9b +#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */ #define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17 #define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0 @@ -5736,6 +7697,8 @@ */ #define MC_CMD_ONLOAD_STACK_ALLOC 0x9c +#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + /* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */ #define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4 /* The handle of the owning upstream port */ @@ -5753,6 +7716,8 @@ */ #define MC_CMD_ONLOAD_STACK_FREE 0x9d +#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD + /* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */ #define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4 /* The handle of the Onload stack */ @@ -5768,6 +7733,8 @@ */ #define MC_CMD_RSS_CONTEXT_ALLOC 0x9e +#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12 /* The handle of the owning upstream port */ @@ -5790,8 +7757,13 @@ /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 -/* The handle of the new RSS context */ +/* The handle of the new RSS context. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 +/* enum: guaranteed invalid RSS context handle value */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff /***********************************/ @@ -5800,6 +7772,8 @@ */ #define MC_CMD_RSS_CONTEXT_FREE 0x9f +#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */ #define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4 /* The handle of the RSS context */ @@ -5815,6 +7789,8 @@ */ #define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0 +#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */ #define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44 /* The handle of the RSS context */ @@ -5833,6 +7809,8 @@ */ #define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1 +#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */ #define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4 /* The handle of the RSS context */ @@ -5851,6 +7829,8 @@ */ #define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2 +#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */ #define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132 /* The handle of the RSS context */ @@ -5869,6 +7849,8 @@ */ #define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3 +#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */ #define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4 /* The handle of the RSS context */ @@ -5887,11 +7869,17 @@ */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1 +#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 -/* Hash control flags */ +/* Hash control flags. The _EN bits are always supported. The _MODE bits only + * work when the firmware reports ADDITIONAL_RSS_MODES in + * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0. + * See the RSS_MODE structure for the meaning of the mode bits. + */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 @@ -5901,6 +7889,20 @@ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4 /* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0 @@ -5912,6 +7914,8 @@ */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2 +#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4 /* The handle of the RSS context */ @@ -5919,7 +7923,12 @@ /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 -/* Hash control flags */ +/* Hash control flags. If any _MODE bits are non-zero (which will only be true + * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be + * disregarded (but are guaranteed to be consistent with the _MODE bits if + * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was + * allocated). + */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 @@ -5929,6 +7938,20 @@ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4 /***********************************/ @@ -5937,6 +7960,8 @@ */ #define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4 +#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */ #define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8 /* The handle of the owning upstream port */ @@ -5949,8 +7974,13 @@ /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4 -/* The handle of the new .1p mapping */ +/* The handle of the new .1p mapping. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 +/* enum: guaranteed invalid .1p mapping handle value */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff /***********************************/ @@ -5959,6 +7989,8 @@ */ #define MC_CMD_DOT1P_MAPPING_FREE 0xa5 +#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */ #define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4 /* The handle of the .1p mapping */ @@ -5974,6 +8006,8 @@ */ #define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6 +#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */ #define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36 /* The handle of the .1p mapping */ @@ -5994,6 +8028,8 @@ */ #define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7 +#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */ #define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4 /* The handle of the .1p mapping */ @@ -6014,6 +8050,8 @@ */ #define MC_CMD_GET_VECTOR_CFG 0xbf +#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_VECTOR_CFG_IN msgrequest */ #define MC_CMD_GET_VECTOR_CFG_IN_LEN 0 @@ -6033,6 +8071,8 @@ */ #define MC_CMD_SET_VECTOR_CFG 0xc0 +#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_SET_VECTOR_CFG_IN msgrequest */ #define MC_CMD_SET_VECTOR_CFG_IN_LEN 12 /* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to @@ -6049,380 +8089,13 @@ /***********************************/ -/* MC_CMD_RMON_RX_CLASS_STATS - * Retrieve rmon rx class statistics - */ -#define MC_CMD_RMON_RX_CLASS_STATS 0xc3 - -/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_CLASS_STATS - * Retrieve rmon tx class statistics - */ -#define MC_CMD_RMON_TX_CLASS_STATS 0xc4 - -/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_SUPER_CLASS_STATS - * Retrieve rmon rx super_class statistics - */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5 - -/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_SUPER_CLASS_STATS - * Retrieve rmon tx super_class statistics - */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6 - -/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS - * Add qid to class for statistics collection - */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7 - -/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12 -/* class */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 -/* qid */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4 -/* flags */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 - -/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS - * Add qid to class for statistics collection - */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8 - -/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12 -/* class */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 -/* qid */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4 -/* flags */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 - -/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS - * Add qid to class for statistics collection - */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9 - -/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12 -/* class */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 -/* qid */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4 -/* flags */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 - -/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_ALLOC_CLASS - * Allocate an rmon class - */ -#define MC_CMD_RMON_ALLOC_CLASS 0xca - -/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */ -#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0 - -/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4 -/* class */ -#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0 - - -/***********************************/ -/* MC_CMD_RMON_DEALLOC_CLASS - * Deallocate an rmon class - */ -#define MC_CMD_RMON_DEALLOC_CLASS 0xcb - -/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */ -#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4 -/* class */ -#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0 - -/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_ALLOC_SUPER_CLASS - * Allocate an rmon super_class - */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc - -/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0 - -/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4 -/* super_class */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0 - - -/***********************************/ -/* MC_CMD_RMON_DEALLOC_SUPER_CLASS - * Deallocate an rmon tx super_class - */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd - -/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4 -/* super_class */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0 - -/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_RX_UP_CONV_STATS - * Retrieve up converter statistics - */ -#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce - -/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPI_STATS - * Retrieve rx ipi stats - */ -#define MC_CMD_RMON_RX_IPI_STATS 0xcf - -/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0 -#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5 -#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5 -#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS - * Retrieve rx ipsec cntxt_ptr indexed stats - */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0 - -/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPSEC_PORT_STATS - * Retrieve rx ipsec port indexed stats - */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1 - -/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS - * Retrieve tx ipsec overflow - */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2 - -/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ /* MC_CMD_VPORT_ADD_MAC_ADDRESS * Add a MAC address to a v-port */ #define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8 +#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */ #define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10 /* The handle of the v-port */ @@ -6441,6 +8114,8 @@ */ #define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9 +#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */ #define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10 /* The handle of the v-port */ @@ -6459,6 +8134,8 @@ */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa +#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4 /* The handle of the v-port */ @@ -6486,6 +8163,8 @@ */ #define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab +#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8 /* Index of the first buffer table entry. */ @@ -6497,7 +8176,7 @@ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num)) -/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */ +/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1 @@ -6510,6 +8189,8 @@ */ #define MC_CMD_SET_RXDP_CONFIG 0xc1 +#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */ #define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4 #define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 @@ -6526,6 +8207,8 @@ */ #define MC_CMD_GET_RXDP_CONFIG 0xc2 +#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */ #define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0 @@ -6537,359 +8220,13 @@ /***********************************/ -/* MC_CMD_RMON_RX_CLASS_DROPS_STATS - * Retrieve rx class drop stats - */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3 - -/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS - * Retrieve rx super class drop stats - */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4 - -/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_ERRORS_STATS - * Retrieve rxdp errors - */ -#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5 - -/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_OVERFLOW_STATS - * Retrieve rxdp overflow - */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6 - -/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPI_STATS - * Retrieve tx ipi stats - */ -#define MC_CMD_RMON_TX_IPI_STATS 0xd7 - -/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0 -#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5 -#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5 -#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS - * Retrieve tx ipsec counters by cntxt_ptr - */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8 - -/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPSEC_PORT_STATS - * Retrieve tx ipsec counters by port - */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9 - -/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS - * Retrieve tx ipsec overflow - */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda - -/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_NOWHERE_STATS - * Retrieve tx nowhere stats - */ -#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb - -/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS - * Retrieve tx nowhere qbb stats - */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc - -/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_ERRORS_STATS - * Retrieve rxdp errors - */ -#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd - -/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_OVERFLOW_STATS - * Retrieve rxdp overflow - */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde - -/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_COLLECT_CLASS_STATS - * Explicitly collect class stats at the specified evb port - */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf - -/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4 -/* The port id associated with the vport/pport at which to collect class stats - */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0 - -/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4 -/* class */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0 - - -/***********************************/ -/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS - * Explicitly collect class stats at the specified evb port - */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0 - -/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4 -/* The port id associated with the vport/pport at which to collect class stats - */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0 - -/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4 -/* super_class */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0 - - -/***********************************/ /* MC_CMD_GET_CLOCK * Return the system and PDCPU clock frequencies. */ #define MC_CMD_GET_CLOCK 0xac +#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_CLOCK_IN msgrequest */ #define MC_CMD_GET_CLOCK_IN_LEN 0 @@ -6907,23 +8244,69 @@ */ #define MC_CMD_SET_CLOCK 0xad +#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SET_CLOCK_IN msgrequest */ -#define MC_CMD_SET_CLOCK_IN_LEN 12 -/* Requested system frequency in MHz; 0 leaves unchanged. */ +#define MC_CMD_SET_CLOCK_IN_LEN 28 +/* Requested frequency in MHz for system clock domain */ #define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 -/* Requested inter-core frequency in MHz; 0 leaves unchanged. */ +/* enum: Leave the system clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for inter-core clock domain */ #define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 -/* Request DPCPU frequency in MHz; 0 leaves unchanged. */ +/* enum: Leave the inter-core clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for DPCPU clock domain */ #define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 +/* enum: Leave the DPCPU clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for PCS clock domain */ +#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12 +/* enum: Leave the PCS clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for MC clock domain */ +#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16 +/* enum: Leave the MC clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for rmon clock domain */ +#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20 +/* enum: Leave the rmon clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for vswitch clock domain */ +#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24 +/* enum: Leave the vswitch clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 /* MC_CMD_SET_CLOCK_OUT msgresponse */ -#define MC_CMD_SET_CLOCK_OUT_LEN 12 +#define MC_CMD_SET_CLOCK_OUT_LEN 28 /* Resulting system frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 +/* enum: The system clock domain doesn't exist */ +#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 /* Resulting inter-core frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 +/* enum: The inter-core clock domain doesn't exist / isn't used */ +#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 /* Resulting DPCPU frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 +/* enum: The dpcpu clock domain doesn't exist */ +#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 +/* Resulting PCS frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12 +/* enum: The PCS clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 +/* Resulting MC frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16 +/* enum: The MC clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 +/* Resulting rmon frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20 +/* enum: The rmon clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 +/* Resulting vswitch frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24 +/* enum: The vswitch clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 /***********************************/ @@ -6932,15 +8315,27 @@ */ #define MC_CMD_DPCPU_RPC 0xae +#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_DPCPU_RPC_IN msgrequest */ #define MC_CMD_DPCPU_RPC_IN_LEN 36 #define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 -/* enum: RxDPCPU */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0 +/* enum: RxDPCPU0 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 /* enum: TxDPCPU0 */ #define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 /* enum: TxDPCPU1 */ #define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 +/* enum: RxDPCPU1 (Medford only) */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 +/* enum: RxDPCPU (will be for the calling function; for now, just an alias of + * DPCPU_RX0) + */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 +/* enum: TxDPCPU (will be for the calling function; for now, just an alias of + * DPCPU_TX0) + */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 /* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be * initialised to zero */ @@ -7016,6 +8411,8 @@ */ #define MC_CMD_TRIGGER_INTERRUPT 0xe3 +#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */ #define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4 /* Interrupt level relative to base for function. */ @@ -7026,11 +8423,32 @@ /***********************************/ +/* MC_CMD_SHMBOOT_OP + * Special operations to support (for now) shmboot. + */ +#define MC_CMD_SHMBOOT_OP 0xe6 + +#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SHMBOOT_OP_IN msgrequest */ +#define MC_CMD_SHMBOOT_OP_IN_LEN 4 +/* Identifies the operation to perform */ +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 +/* enum: Copy slave_data section to the slave core. (Greenport only) */ +#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 + +/* MC_CMD_SHMBOOT_OP_OUT msgresponse */ +#define MC_CMD_SHMBOOT_OP_OUT_LEN 0 + + +/***********************************/ /* MC_CMD_CAP_BLK_READ * Read multiple 64bit words from capture block memory */ #define MC_CMD_CAP_BLK_READ 0xe7 +#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_CAP_BLK_READ_IN msgrequest */ #define MC_CMD_CAP_BLK_READ_IN_LEN 12 #define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0 @@ -7055,6 +8473,8 @@ */ #define MC_CMD_DUMP_DO 0xe8 +#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_DUMP_DO_IN msgrequest */ #define MC_CMD_DUMP_DO_IN_LEN 52 #define MC_CMD_DUMP_DO_IN_PADDING_OFST 0 @@ -7108,6 +8528,8 @@ */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9 +#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0 @@ -7151,6 +8573,8 @@ */ #define MC_CMD_SET_PSU 0xea +#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SET_PSU_IN msgrequest */ #define MC_CMD_SET_PSU_IN_LEN 12 #define MC_CMD_SET_PSU_IN_PARAM_OFST 0 @@ -7171,6 +8595,8 @@ */ #define MC_CMD_GET_FUNCTION_INFO 0xec +#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */ #define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0 @@ -7188,6 +8614,8 @@ */ #define MC_CMD_ENABLE_OFFLINE_BIST 0xed +#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */ #define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0 @@ -7203,6 +8631,8 @@ */ #define MC_CMD_UART_SEND_DATA 0xee +#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_UART_SEND_DATA_OUT msgrequest */ #define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16 #define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252 @@ -7231,6 +8661,8 @@ */ #define MC_CMD_UART_RECV_DATA 0xef +#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_UART_RECV_DATA_OUT msgrequest */ #define MC_CMD_UART_RECV_DATA_OUT_LEN 16 /* CRC32 over OFFSET, LENGTH, RESERVED */ @@ -7266,6 +8698,8 @@ */ #define MC_CMD_READ_FUSES 0xf0 +#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_READ_FUSES_IN msgrequest */ #define MC_CMD_READ_FUSES_IN_LEN 8 /* Offset in OTP to read */ @@ -7292,6 +8726,8 @@ */ #define MC_CMD_KR_TUNE 0xf1 +#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_KR_TUNE_IN msgrequest */ #define MC_CMD_KR_TUNE_IN_LENMIN 4 #define MC_CMD_KR_TUNE_IN_LENMAX 252 @@ -7318,6 +8754,8 @@ * more data is returned. */ #define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 +/* enum: Read Figure Of Merit (eye quality, higher is better). */ +#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7 /* Align the arguments to 32 bits */ #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3 @@ -7350,20 +8788,32 @@ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: Attenuation (0-15) */ +/* enum: Attenuation (0-15, TBD for Medford) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 -/* enum: CTLE Boost (0-15) */ +/* enum: CTLE Boost (0-15, TBD for Medford) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 -/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ +/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD + * for Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 -/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 -/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 -/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 -/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 +/* enum: Edge DFE DLEV (TBD for Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -7453,6 +8903,8 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 /* enum: TX Slew Rate Fine control */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 +/* enum: TX Termination Impedance control */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -7543,6 +8995,20 @@ #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 +/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4 + +/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */ +#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4 +#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0 + /***********************************/ /* MC_CMD_PCIE_TUNE @@ -7550,6 +9016,8 @@ */ #define MC_CMD_PCIE_TUNE 0xf2 +#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_PCIE_TUNE_IN msgrequest */ #define MC_CMD_PCIE_TUNE_IN_LENMIN 4 #define MC_CMD_PCIE_TUNE_IN_LENMAX 252 @@ -7711,6 +9179,8 @@ */ #define MC_CMD_LICENSING 0xf3 +#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_LICENSING_IN msgrequest */ #define MC_CMD_LICENSING_IN_LEN 4 /* identifies the type of operation requested */ @@ -7756,6 +9226,8 @@ */ #define MC_CMD_MC2MC_PROXY 0xf4 +#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_MC2MC_PROXY_IN msgrequest */ #define MC_CMD_MC2MC_PROXY_IN_LEN 0 @@ -7771,6 +9243,8 @@ */ #define MC_CMD_GET_LICENSED_APP_STATE 0xf5 +#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */ #define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4 /* application ID to query (LICENSED_APP_ID_xxx) */ @@ -7792,6 +9266,8 @@ */ #define MC_CMD_LICENSED_APP_OP 0xf6 +#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_LICENSED_APP_OP_IN msgrequest */ #define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8 #define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252 @@ -7802,6 +9278,8 @@ #define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 /* enum: validate application */ #define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +/* enum: mask application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 /* arguments specific to this particular operation */ #define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 #define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 @@ -7836,10 +9314,22 @@ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 +/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4 +/* flag */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8 + +/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 + /***********************************/ /* MC_CMD_SET_PORT_SNIFF_CONFIG - * Configure port sniffing for the physical port associated with the calling + * Configure RX port sniffing for the physical port associated with the calling * function. Only a privileged function may change the port sniffing * configuration. A copy of all traffic delivered to the host (non-promiscuous * mode) or all traffic arriving at the port (promiscuous mode) may be @@ -7847,6 +9337,8 @@ */ #define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7 +#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16 /* configuration flags */ @@ -7875,12 +9367,14 @@ /***********************************/ /* MC_CMD_GET_PORT_SNIFF_CONFIG - * Obtain the current port sniffing configuration for the physical port + * Obtain the current RX port sniffing configuration for the physical port * associated with the calling function. Only a privileged function may read * the configuration. */ #define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8 +#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0 @@ -7904,4 +9398,673 @@ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +/***********************************/ +/* MC_CMD_SET_PARSER_DISP_CONFIG + * Change configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9 + +#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num)) +/* the type of configuration setting to change */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +/* enum: Per-TXQ enable for multicast UDP destination lookup for possible + * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 +/* enum: Per-v-adaptor enable for suppression of self-transmissions on the + * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single + * boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 +/* handle for the entity to update: queue handle, EVB port ID, etc. depending + * on the type of configuration setting being changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +/* new value: the details depend on the type of configuration setting being + * changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61 + +/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_CONFIG + * Read configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa + +#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8 +/* the type of configuration setting to read */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */ +/* handle for the entity to query: queue handle, EVB port ID, etc. depending on + * the type of configuration setting being read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 + +/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num)) +/* current value: the details depend on the type of configuration setting being + * read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG + * Configure TX port sniffing for the physical port associated with the calling + * function. Only a privileged function may change the port sniffing + * configuration. A copy of all traffic transmitted through the port may be + * delivered to a specific queue, or a set of queues with RSS. Note that these + * packets are delivered with transmit timestamps in the packet prefix, not + * receive timestamps, so it is likely that the queue(s) will need to be + * dedicated as TX sniff receivers. + */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb + +#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16 +/* configuration flags */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 +/* receive queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +/* receive mode */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +/* enum: receive to just the specified queue */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note + * that these handles should be considered opaque to the host, although a value + * of 0xFFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 + +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG + * Obtain the current TX port sniffing configuration for the physical port + * associated with the calling function. Only a privileged function may read + * the configuration. + */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc + +#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16 +/* configuration flags */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 +/* receiving queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +/* receive mode */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +/* enum: receiving to just the specified queue */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +/* enum: receiving to multiple queues using RSS context */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 + + +/***********************************/ +/* MC_CMD_RMON_STATS_RX_ERRORS + * Per queue rx error stats. + */ +#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe + +#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */ +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8 +/* The rx queue to get stats for. */ +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */ +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12 + + +/***********************************/ +/* MC_CMD_GET_PCIE_RESOURCE_INFO + * Find out about available PCIE resources + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd + +/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0 + +/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28 +/* The maximum number of PFs the device can expose */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0 +/* The maximum number of VFs the device can expose in total */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4 +/* The maximum number of MSI-X vectors the device can provide in total */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8 +/* the number of MSI-X vectors the device will allocate by default to each PF + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12 +/* the number of MSI-X vectors the device will allocate by default to each VF + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16 +/* the maximum number of MSI-X vectors the device can allocate to any one PF */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20 +/* the maximum number of MSI-X vectors the device can allocate to any one VF */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24 + + +/***********************************/ +/* MC_CMD_GET_PORT_MODES + * Find out about available port modes + */ +#define MC_CMD_GET_PORT_MODES 0xff + +#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_MODES_IN msgrequest */ +#define MC_CMD_GET_PORT_MODES_IN_LEN 0 + +/* MC_CMD_GET_PORT_MODES_OUT msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_LEN 12 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */ +#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 + + +/***********************************/ +/* MC_CMD_READ_ATB + * Sample voltages on the ATB + */ +#define MC_CMD_READ_ATB 0x100 + +#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_READ_ATB_IN msgrequest */ +#define MC_CMD_READ_ATB_IN_LEN 16 +#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0 +#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ +#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4 +#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8 +#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12 + +/* MC_CMD_READ_ATB_OUT msgresponse */ +#define MC_CMD_READ_ATB_OUT_LEN 4 +#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0 + + +/***********************************/ +/* MC_CMD_GET_WORKAROUNDS + * Read the list of all implemented and all currently enabled workarounds. The + * enums here must correspond with those in MC_CMD_WORKAROUND. + */ +#define MC_CMD_GET_WORKAROUNDS 0x59 + +#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */ +#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8 +/* Each workaround is represented by a single bit according to the enums below. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MASK + * Read/set privileges of an arbitrary PCIe function + */ +#define MC_CMD_PRIVILEGE_MASK 0x5a + +#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8 +/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF + * 1,3 = 0x00030001 + */ +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ +/* New privilege mask to be set. The mask will only be changed if the MSB is + * set to 1. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +/* enum: Set this bit to indicate that a new privilege mask is to be set, + * otherwise the command will only read the existing mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 + +/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4 +/* For an admin function, always all the privileges are reported. */ +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0 + + +/***********************************/ +/* MC_CMD_LINK_STATE_MODE + * Read/set link state mode of a VF + */ +#define MC_CMD_LINK_STATE_MODE 0x5c + +#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LINK_STATE_MODE_IN msgrequest */ +#define MC_CMD_LINK_STATE_MODE_IN_LEN 8 +/* The target function to have its link state mode read or set, must be a VF + * e.g. VF 1,3 = 0x00030001 + */ +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 +/* New link state mode to be set */ +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ +/* enum: Use this value to just read the existing setting without modifying it. + */ +#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff + +/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ +#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 + + +/***********************************/ +/* MC_CMD_GET_SNAPSHOT_LENGTH + * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH + * parameter to MC_CMD_INIT_RXQ. + */ +#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101 + +#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0 + +/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8 +/* Minimum acceptable snapshot length. */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0 +/* Maximum acceptable snapshot length. */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4 + + +/***********************************/ +/* MC_CMD_FUSE_DIAGS + * Additional fuse diagnostics + */ +#define MC_CMD_FUSE_DIAGS 0x102 + +#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_FUSE_DIAGS_IN msgrequest */ +#define MC_CMD_FUSE_DIAGS_IN_LEN 0 + +/* MC_CMD_FUSE_DIAGS_OUT msgresponse */ +#define MC_CMD_FUSE_DIAGS_OUT_LEN 48 +/* Total number of mismatched bits between pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0 +/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8 +/* Checksum of data after logical OR of pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12 +/* Total number of mismatched bits between pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16 +/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20 +/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24 +/* Checksum of data after logical OR of pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28 +/* Total number of mismatched bits between pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32 +/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36 +/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40 +/* Checksum of data after logical OR of pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MODIFY + * Modify the privileges of a set of PCIe functions. Note that this operation + * only effects non-admin functions unless the admin privilege itself is + * included in one of the masks provided. + */ +#define MC_CMD_PRIVILEGE_MODIFY 0x60 + +#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16 +/* The groups of functions to have their privilege masks modified. */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ +/* For VFS_OF_PF specify the PF, for ONE specify the target function */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16 +/* Privileges to be added to the target functions. For privilege definitions + * refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8 +/* Privileges to be removed from the target functions. For privilege + * definitions refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12 + +/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_READ_BYTES + * Read XPM memory + */ +#define MC_CMD_XPM_READ_BYTES 0x103 + +#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_READ_BYTES_IN msgrequest */ +#define MC_CMD_XPM_READ_BYTES_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4 + +/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */ +#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0 +#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252 +#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num)) +/* Data */ +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_BYTES + * Write XPM memory + */ +#define MC_CMD_XPM_WRITE_BYTES 0x104 + +#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */ +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8 +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252 +#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num)) +/* Start address (byte) */ +#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4 +/* Data */ +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244 + +/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_READ_SECTOR + * Read XPM sector + */ +#define MC_CMD_XPM_READ_SECTOR 0x105 + +#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8 +/* Sector index */ +#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0 +/* Sector size */ +#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4 + +/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36 +#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num)) +/* Sector type */ +#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ +/* Sector data */ +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_SECTOR + * Write XPM sector + */ +#define MC_CMD_XPM_WRITE_SECTOR 0x106 + +#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num)) +/* If writing fails due to an uncorrectable error, try up to RETRIES following + * sectors (or until no more space available). If 0, only one write attempt is + * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair + * mechanism. + */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3 +/* Sector type */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 +/* Enum values, see field(s): */ +/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ +/* Sector size */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 +/* Sector data */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32 + +/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4 +/* New sector index */ +#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0 + + +/***********************************/ +/* MC_CMD_XPM_INVALIDATE_SECTOR + * Invalidate XPM sector + */ +#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107 + +#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4 +/* Sector index */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0 + +/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_BLANK_CHECK + * Blank-check XPM memory and report bad locations + */ +#define MC_CMD_XPM_BLANK_CHECK 0x108 + +#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */ +#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4 + +/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num)) +/* Total number of bad (non-blank) locations */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0 +/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit + * into MCDI response) + */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124 + + +/***********************************/ +/* MC_CMD_XPM_REPAIR + * Blank-check and repair XPM memory + */ +#define MC_CMD_XPM_REPAIR 0x109 + +#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_REPAIR_IN msgrequest */ +#define MC_CMD_XPM_REPAIR_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4 + +/* MC_CMD_XPM_REPAIR_OUT msgresponse */ +#define MC_CMD_XPM_REPAIR_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_DECODER_TEST + * Test XPM memory address decoders for gross manufacturing defects. Can only + * be performed on an unprogrammed part. + */ +#define MC_CMD_XPM_DECODER_TEST 0x10a + +#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */ +#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0 + +/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */ +#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_TEST + * XPM memory write test. Test XPM write logic for gross manufacturing defects + * by writing to a dedicated test row. There are 16 locations in the test row + * and the test can only be performed on locations that have not been + * previously used (i.e. can be run at most 16 times). The test will pick the + * first available location to use, or fail with ENOSPC if none left. + */ +#define MC_CMD_XPM_WRITE_TEST 0x10b + +#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */ +#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0 + +/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0 + + #endif /* MCDI_PCOL_H */ diff --git a/kernel/drivers/net/ethernet/sfc/mcdi_port.c b/kernel/drivers/net/ethernet/sfc/mcdi_port.c index fb19b70ea..7f295c4d7 100644 --- a/kernel/drivers/net/ethernet/sfc/mcdi_port.c +++ b/kernel/drivers/net/ethernet/sfc/mcdi_port.c @@ -865,6 +865,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx) BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); + /* This has no effect on EF10 */ ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), efx->net_dev->dev_addr); @@ -923,6 +924,7 @@ enum efx_stats_action { static int efx_mcdi_mac_stats(struct efx_nic *efx, enum efx_stats_action action, int clear) { + struct efx_ef10_nic_data *nic_data = efx->nic_data; MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); int rc; int change = action == EFX_STATS_PULL ? 0 : 1; @@ -944,9 +946,14 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, MAC_STATS_IN_PERIODIC_NOEVENT, 1, MAC_STATS_IN_PERIOD_MS, period); MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); - - rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), - NULL, 0, NULL); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), + NULL, 0, NULL); + /* Expect ENOENT if DMA queues have not been set up */ + if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues))) + efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf), + NULL, 0, rc); return rc; } diff --git a/kernel/drivers/net/ethernet/sfc/net_driver.h b/kernel/drivers/net/ethernet/sfc/net_driver.h index 325dd94bc..a8ddd122f 100644 --- a/kernel/drivers/net/ethernet/sfc/net_driver.h +++ b/kernel/drivers/net/ethernet/sfc/net_driver.h @@ -25,6 +25,7 @@ #include <linux/highmem.h> #include <linux/workqueue.h> #include <linux/mutex.h> +#include <linux/rwsem.h> #include <linux/vmalloc.h> #include <linux/i2c.h> #include <linux/mtd/mtd.h> @@ -218,6 +219,7 @@ struct efx_tx_buffer { * @tso_packets: Number of packets via the TSO xmit path * @pushes: Number of times the TX push feature has been used * @pio_packets: Number of times the TX PIO feature has been used + * @xmit_more_available: Are any packets waiting to be pushed to the NIC * @empty_read_count: If the completion path has seen the queue as empty * and the transmission path has not yet checked this, the value of * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. @@ -240,6 +242,8 @@ struct efx_tx_queue { unsigned int read_count ____cacheline_aligned_in_smp; unsigned int old_write_count; unsigned int merge_events; + unsigned int bytes_compl; + unsigned int pkts_compl; /* Members used only on the xmit path */ unsigned int insert_count ____cacheline_aligned_in_smp; @@ -250,6 +254,7 @@ struct efx_tx_queue { unsigned int tso_packets; unsigned int pushes; unsigned int pio_packets; + bool xmit_more_available; /* Statistics to supplement MAC stats */ unsigned long tx_packets; @@ -428,21 +433,8 @@ struct efx_channel { struct net_device *napi_dev; struct napi_struct napi_str; #ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int state; - spinlock_t state_lock; -#define EFX_CHANNEL_STATE_IDLE 0 -#define EFX_CHANNEL_STATE_NAPI (1 << 0) /* NAPI owns this channel */ -#define EFX_CHANNEL_STATE_POLL (1 << 1) /* poll owns this channel */ -#define EFX_CHANNEL_STATE_DISABLED (1 << 2) /* channel is disabled */ -#define EFX_CHANNEL_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this channel */ -#define EFX_CHANNEL_STATE_POLL_YIELD (1 << 4) /* poll yielded this channel */ -#define EFX_CHANNEL_OWNED \ - (EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL) -#define EFX_CHANNEL_LOCKED \ - (EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED) -#define EFX_CHANNEL_USER_PEND \ - (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD) -#endif /* CONFIG_NET_RX_BUSY_POLL */ + unsigned long busy_poll_state; +#endif struct efx_special_buffer eventq; unsigned int eventq_mask; unsigned int eventq_read_ptr; @@ -477,98 +469,94 @@ struct efx_channel { }; #ifdef CONFIG_NET_RX_BUSY_POLL -static inline void efx_channel_init_lock(struct efx_channel *channel) +enum efx_channel_busy_poll_state { + EFX_CHANNEL_STATE_IDLE = 0, + EFX_CHANNEL_STATE_NAPI = BIT(0), + EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1, + EFX_CHANNEL_STATE_NAPI_REQ = BIT(1), + EFX_CHANNEL_STATE_POLL_BIT = 2, + EFX_CHANNEL_STATE_POLL = BIT(2), + EFX_CHANNEL_STATE_DISABLE_BIT = 3, +}; + +static inline void efx_channel_busy_poll_init(struct efx_channel *channel) { - spin_lock_init(&channel->state_lock); + WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE); } /* Called from the device poll routine to get ownership of a channel. */ static inline bool efx_channel_lock_napi(struct efx_channel *channel) { - bool rc = true; - - spin_lock_bh(&channel->state_lock); - if (channel->state & EFX_CHANNEL_LOCKED) { - WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI); - channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD; - rc = false; - } else { - /* we don't care if someone yielded */ - channel->state = EFX_CHANNEL_STATE_NAPI; + unsigned long prev, old = READ_ONCE(channel->busy_poll_state); + + while (1) { + switch (old) { + case EFX_CHANNEL_STATE_POLL: + /* Ensure efx_channel_try_lock_poll() wont starve us */ + set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT, + &channel->busy_poll_state); + /* fallthrough */ + case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ: + return false; + default: + break; + } + prev = cmpxchg(&channel->busy_poll_state, old, + EFX_CHANNEL_STATE_NAPI); + if (unlikely(prev != old)) { + /* This is likely to mean we've just entered polling + * state. Go back round to set the REQ bit. + */ + old = prev; + continue; + } + return true; } - spin_unlock_bh(&channel->state_lock); - return rc; } static inline void efx_channel_unlock_napi(struct efx_channel *channel) { - spin_lock_bh(&channel->state_lock); - WARN_ON(channel->state & - (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD)); - - channel->state &= EFX_CHANNEL_STATE_DISABLED; - spin_unlock_bh(&channel->state_lock); + /* Make sure write has completed from efx_channel_lock_napi() */ + smp_wmb(); + WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE); } /* Called from efx_busy_poll(). */ -static inline bool efx_channel_lock_poll(struct efx_channel *channel) +static inline bool efx_channel_try_lock_poll(struct efx_channel *channel) { - bool rc = true; - - spin_lock_bh(&channel->state_lock); - if ((channel->state & EFX_CHANNEL_LOCKED)) { - channel->state |= EFX_CHANNEL_STATE_POLL_YIELD; - rc = false; - } else { - /* preserve yield marks */ - channel->state |= EFX_CHANNEL_STATE_POLL; - } - spin_unlock_bh(&channel->state_lock); - return rc; + return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE, + EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE; } -/* Returns true if NAPI tried to get the channel while it was locked. */ static inline void efx_channel_unlock_poll(struct efx_channel *channel) { - spin_lock_bh(&channel->state_lock); - WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI); - - /* will reset state to idle, unless channel is disabled */ - channel->state &= EFX_CHANNEL_STATE_DISABLED; - spin_unlock_bh(&channel->state_lock); + clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); } -/* True if a socket is polling, even if it did not get the lock. */ static inline bool efx_channel_busy_polling(struct efx_channel *channel) { - WARN_ON(!(channel->state & EFX_CHANNEL_OWNED)); - return channel->state & EFX_CHANNEL_USER_PEND; + return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state); } static inline void efx_channel_enable(struct efx_channel *channel) { - spin_lock_bh(&channel->state_lock); - channel->state = EFX_CHANNEL_STATE_IDLE; - spin_unlock_bh(&channel->state_lock); + clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT, + &channel->busy_poll_state); } -/* False if the channel is currently owned. */ +/* Stop further polling or napi access. + * Returns false if the channel is currently busy polling. + */ static inline bool efx_channel_disable(struct efx_channel *channel) { - bool rc = true; - - spin_lock_bh(&channel->state_lock); - if (channel->state & EFX_CHANNEL_OWNED) - rc = false; - channel->state |= EFX_CHANNEL_STATE_DISABLED; - spin_unlock_bh(&channel->state_lock); - - return rc; + set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state); + /* Implicit barrier in efx_channel_busy_polling() */ + return !efx_channel_busy_polling(channel); } #else /* CONFIG_NET_RX_BUSY_POLL */ -static inline void efx_channel_init_lock(struct efx_channel *channel) +static inline void efx_channel_busy_poll_init(struct efx_channel *channel) { } @@ -581,7 +569,7 @@ static inline void efx_channel_unlock_napi(struct efx_channel *channel) { } -static inline bool efx_channel_lock_poll(struct efx_channel *channel) +static inline bool efx_channel_try_lock_poll(struct efx_channel *channel) { return false; } @@ -793,7 +781,6 @@ union efx_multicast_hash { efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; }; -struct efx_vf; struct vfdi_status; /** @@ -897,7 +884,8 @@ struct vfdi_status; * @loopback_mode: Loopback status * @loopback_modes: Supported loopback mode bitmask * @loopback_selftest: Offline self-test private state - * @filter_lock: Filter table lock + * @filter_sem: Filter table rw_semaphore, for freeing the table + * @filter_lock: Filter table lock, for mere content changes * @filter_state: Architecture-dependent filter table state * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, * indexed by filter ID @@ -909,7 +897,6 @@ struct vfdi_status; * completed (either success or failure). Not used when MCDI is used to * flush receive queues. * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions. - * @vf: Array of &struct efx_vf objects. * @vf_count: Number of VFs intended to be enabled. * @vf_init_count: Number of VFs that have been fully initialised. * @vi_scale: log2 number of vnics per VF. @@ -923,6 +910,7 @@ struct vfdi_status; * @stats_lock: Statistics update lock. Must be held when calling * efx_nic_type::{update,start,stop}_stats. * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb + * @mc_promisc: Whether in multicast promiscuous mode when last changed * * This is stored in the private area of the &struct net_device. */ @@ -969,6 +957,7 @@ struct efx_nic { unsigned next_buffer_table; unsigned int max_channels; + unsigned int max_tx_channels; unsigned n_channels; unsigned n_rx_channels; unsigned rss_spread; @@ -1040,6 +1029,7 @@ struct efx_nic { void *loopback_selftest; + struct rw_semaphore filter_sem; spinlock_t filter_lock; void *filter_state; #ifdef CONFIG_RFS_ACCEL @@ -1053,7 +1043,6 @@ struct efx_nic { wait_queue_head_t flush_wq; #ifdef CONFIG_SFC_SRIOV - struct efx_vf *vf; unsigned vf_count; unsigned vf_init_count; unsigned vi_scale; @@ -1070,6 +1059,7 @@ struct efx_nic { int last_irq_cpu; spinlock_t stats_lock; atomic_t n_rx_noskb_drops; + bool mc_promisc; }; static inline int efx_dev_registered(struct efx_nic *efx) @@ -1092,6 +1082,7 @@ struct efx_mtd_partition { /** * struct efx_nic_type - Efx device type definition + * @mem_bar: Get the memory BAR * @mem_map_size: Get memory BAR mapped size * @probe: Probe the controller * @remove: Free resources allocated by probe() @@ -1204,6 +1195,7 @@ struct efx_mtd_partition { * @ptp_set_ts_config: Set hardware timestamp configuration. The flags * and tx_type will already have been validated but this operation * must validate and update rx_filter. + * @set_mac_address: Set the MAC address of the device * @revision: Hardware architecture revision * @txd_ptr_tbl_base: TX descriptor ring base address * @rxd_ptr_tbl_base: RX descriptor ring base address @@ -1226,6 +1218,8 @@ struct efx_mtd_partition { * @hwtstamp_filters: Mask of hardware timestamp filter types supported */ struct efx_nic_type { + bool is_vf; + unsigned int mem_bar; unsigned int (*mem_map_size)(struct efx_nic *efx); int (*probe)(struct efx_nic *efx); void (*remove)(struct efx_nic *efx); @@ -1268,6 +1262,7 @@ struct efx_nic_type { void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu, size_t pdu_offset, size_t pdu_len); int (*mcdi_poll_reboot)(struct efx_nic *efx); + void (*mcdi_reboot_detected)(struct efx_nic *efx); void (*irq_enable_master)(struct efx_nic *efx); void (*irq_test_generate)(struct efx_nic *efx); void (*irq_disable_non_ev)(struct efx_nic *efx); @@ -1277,7 +1272,8 @@ struct efx_nic_type { void (*tx_init)(struct efx_tx_queue *tx_queue); void (*tx_remove)(struct efx_tx_queue *tx_queue); void (*tx_write)(struct efx_tx_queue *tx_queue); - void (*rx_push_rss_config)(struct efx_nic *efx); + int (*rx_push_rss_config)(struct efx_nic *efx, bool user, + const u32 *rx_indir_table); int (*rx_probe)(struct efx_rx_queue *rx_queue); void (*rx_init)(struct efx_rx_queue *rx_queue); void (*rx_remove)(struct efx_rx_queue *rx_queue); @@ -1330,11 +1326,28 @@ struct efx_nic_type { int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp); int (*ptp_set_ts_config)(struct efx_nic *efx, struct hwtstamp_config *init); + int (*sriov_configure)(struct efx_nic *efx, int num_vfs); int (*sriov_init)(struct efx_nic *efx); void (*sriov_fini)(struct efx_nic *efx); - void (*sriov_mac_address_changed)(struct efx_nic *efx); bool (*sriov_wanted)(struct efx_nic *efx); void (*sriov_reset)(struct efx_nic *efx); + void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i); + int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac); + int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan, + u8 qos); + int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i, + bool spoofchk); + int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i, + struct ifla_vf_info *ivi); + int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i, + int link_state); + int (*sriov_get_phys_port_id)(struct efx_nic *efx, + struct netdev_phys_item_id *ppid); + int (*vswitching_probe)(struct efx_nic *efx); + int (*vswitching_restore)(struct efx_nic *efx); + void (*vswitching_remove)(struct efx_nic *efx); + int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr); + int (*set_mac_address)(struct efx_nic *efx); int revision; unsigned int txd_ptr_tbl_base; diff --git a/kernel/drivers/net/ethernet/sfc/nic.h b/kernel/drivers/net/ethernet/sfc/nic.h index 93d10cbbd..0b536e27d 100644 --- a/kernel/drivers/net/ethernet/sfc/nic.h +++ b/kernel/drivers/net/ethernet/sfc/nic.h @@ -381,6 +381,7 @@ enum { * @efx: Pointer back to main interface structure * @wol_filter_id: Wake-on-LAN packet filter id * @stats: Hardware statistics + * @vf: Array of &struct siena_vf objects * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. * @vfdi_status: Common VFDI status page to be dmad to VF address space. * @local_addr_list: List of local addresses. Protected by %local_lock. @@ -394,6 +395,7 @@ struct siena_nic_data { int wol_filter_id; u64 stats[SIENA_STAT_COUNT]; #ifdef CONFIG_SFC_SRIOV + struct siena_vf *vf; struct efx_channel *vfdi_channel; unsigned vf_buftbl_base; struct efx_buffer vfdi_status; @@ -405,59 +407,77 @@ struct siena_nic_data { }; enum { - EF10_STAT_tx_bytes = GENERIC_STAT_COUNT, - EF10_STAT_tx_packets, - EF10_STAT_tx_pause, - EF10_STAT_tx_control, - EF10_STAT_tx_unicast, - EF10_STAT_tx_multicast, - EF10_STAT_tx_broadcast, - EF10_STAT_tx_lt64, - EF10_STAT_tx_64, - EF10_STAT_tx_65_to_127, - EF10_STAT_tx_128_to_255, - EF10_STAT_tx_256_to_511, - EF10_STAT_tx_512_to_1023, - EF10_STAT_tx_1024_to_15xx, - EF10_STAT_tx_15xx_to_jumbo, - EF10_STAT_rx_bytes, - EF10_STAT_rx_bytes_minus_good_bytes, - EF10_STAT_rx_good_bytes, - EF10_STAT_rx_bad_bytes, - EF10_STAT_rx_packets, - EF10_STAT_rx_good, - EF10_STAT_rx_bad, - EF10_STAT_rx_pause, - EF10_STAT_rx_control, + EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT, + EF10_STAT_port_tx_packets, + EF10_STAT_port_tx_pause, + EF10_STAT_port_tx_control, + EF10_STAT_port_tx_unicast, + EF10_STAT_port_tx_multicast, + EF10_STAT_port_tx_broadcast, + EF10_STAT_port_tx_lt64, + EF10_STAT_port_tx_64, + EF10_STAT_port_tx_65_to_127, + EF10_STAT_port_tx_128_to_255, + EF10_STAT_port_tx_256_to_511, + EF10_STAT_port_tx_512_to_1023, + EF10_STAT_port_tx_1024_to_15xx, + EF10_STAT_port_tx_15xx_to_jumbo, + EF10_STAT_port_rx_bytes, + EF10_STAT_port_rx_bytes_minus_good_bytes, + EF10_STAT_port_rx_good_bytes, + EF10_STAT_port_rx_bad_bytes, + EF10_STAT_port_rx_packets, + EF10_STAT_port_rx_good, + EF10_STAT_port_rx_bad, + EF10_STAT_port_rx_pause, + EF10_STAT_port_rx_control, + EF10_STAT_port_rx_unicast, + EF10_STAT_port_rx_multicast, + EF10_STAT_port_rx_broadcast, + EF10_STAT_port_rx_lt64, + EF10_STAT_port_rx_64, + EF10_STAT_port_rx_65_to_127, + EF10_STAT_port_rx_128_to_255, + EF10_STAT_port_rx_256_to_511, + EF10_STAT_port_rx_512_to_1023, + EF10_STAT_port_rx_1024_to_15xx, + EF10_STAT_port_rx_15xx_to_jumbo, + EF10_STAT_port_rx_gtjumbo, + EF10_STAT_port_rx_bad_gtjumbo, + EF10_STAT_port_rx_overflow, + EF10_STAT_port_rx_align_error, + EF10_STAT_port_rx_length_error, + EF10_STAT_port_rx_nodesc_drops, + EF10_STAT_port_rx_pm_trunc_bb_overflow, + EF10_STAT_port_rx_pm_discard_bb_overflow, + EF10_STAT_port_rx_pm_trunc_vfifo_full, + EF10_STAT_port_rx_pm_discard_vfifo_full, + EF10_STAT_port_rx_pm_trunc_qbb, + EF10_STAT_port_rx_pm_discard_qbb, + EF10_STAT_port_rx_pm_discard_mapping, + EF10_STAT_port_rx_dp_q_disabled_packets, + EF10_STAT_port_rx_dp_di_dropped_packets, + EF10_STAT_port_rx_dp_streaming_packets, + EF10_STAT_port_rx_dp_hlb_fetch, + EF10_STAT_port_rx_dp_hlb_wait, EF10_STAT_rx_unicast, + EF10_STAT_rx_unicast_bytes, EF10_STAT_rx_multicast, + EF10_STAT_rx_multicast_bytes, EF10_STAT_rx_broadcast, - EF10_STAT_rx_lt64, - EF10_STAT_rx_64, - EF10_STAT_rx_65_to_127, - EF10_STAT_rx_128_to_255, - EF10_STAT_rx_256_to_511, - EF10_STAT_rx_512_to_1023, - EF10_STAT_rx_1024_to_15xx, - EF10_STAT_rx_15xx_to_jumbo, - EF10_STAT_rx_gtjumbo, - EF10_STAT_rx_bad_gtjumbo, + EF10_STAT_rx_broadcast_bytes, + EF10_STAT_rx_bad, + EF10_STAT_rx_bad_bytes, EF10_STAT_rx_overflow, - EF10_STAT_rx_align_error, - EF10_STAT_rx_length_error, - EF10_STAT_rx_nodesc_drops, - EF10_STAT_rx_pm_trunc_bb_overflow, - EF10_STAT_rx_pm_discard_bb_overflow, - EF10_STAT_rx_pm_trunc_vfifo_full, - EF10_STAT_rx_pm_discard_vfifo_full, - EF10_STAT_rx_pm_trunc_qbb, - EF10_STAT_rx_pm_discard_qbb, - EF10_STAT_rx_pm_discard_mapping, - EF10_STAT_rx_dp_q_disabled_packets, - EF10_STAT_rx_dp_di_dropped_packets, - EF10_STAT_rx_dp_streaming_packets, - EF10_STAT_rx_dp_hlb_fetch, - EF10_STAT_rx_dp_hlb_wait, + EF10_STAT_tx_unicast, + EF10_STAT_tx_unicast_bytes, + EF10_STAT_tx_multicast, + EF10_STAT_tx_multicast_bytes, + EF10_STAT_tx_broadcast, + EF10_STAT_tx_broadcast_bytes, + EF10_STAT_tx_bad, + EF10_STAT_tx_bad_bytes, + EF10_STAT_tx_overflow, EF10_STAT_COUNT }; @@ -483,12 +503,22 @@ enum { * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC * reboot * @rx_rss_context: Firmware handle for our RSS context + * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared * @stats: Hardware statistics * @workaround_35388: Flag: firmware supports workaround for bug 35388 + * @workaround_26807: Flag: firmware supports workaround for bug 26807 * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated * after MC reboot * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of * %MC_CMD_GET_CAPABILITIES response) + * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU + * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU + * @vport_id: The function's vport ID, only relevant for PFs + * @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot + * @pf_index: The number for this PF, or the parent PF if this is a VF +#ifdef CONFIG_SFC_SRIOV + * @vf: Pointer to VF data structure +#endif */ struct efx_ef10_nic_data { struct efx_buffer mcdi_buf; @@ -503,126 +533,28 @@ struct efx_ef10_nic_data { unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; bool must_restore_piobufs; u32 rx_rss_context; + bool rx_rss_context_exclusive; u64 stats[EF10_STAT_COUNT]; bool workaround_35388; + bool workaround_26807; bool must_check_datapath_caps; u32 datapath_caps; -}; - -/* - * On the SFC9000 family each port is associated with 1 PCI physical - * function (PF) handled by sfc and a configurable number of virtual - * functions (VFs) that may be handled by some other driver, often in - * a VM guest. The queue pointer registers are mapped in both PF and - * VF BARs such that an 8K region provides access to a single RX, TX - * and event queue (collectively a Virtual Interface, VI or VNIC). - * - * The PF has access to all 1024 VIs while VFs are mapped to VIs - * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered - * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE). - * The number of VIs and the VI_SCALE value are configurable but must - * be established at boot time by firmware. - */ - -/* Maximum VI_SCALE parameter supported by Siena */ -#define EFX_VI_SCALE_MAX 6 -/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX), - * so this is the smallest allowed value. */ -#define EFX_VI_BASE 128U -/* Maximum number of VFs allowed */ -#define EFX_VF_COUNT_MAX 127 -/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */ -#define EFX_MAX_VF_EVQ_SIZE 8192UL -/* The number of buffer table entries reserved for each VI on a VF */ -#define EFX_VF_BUFTBL_PER_VI \ - ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \ - sizeof(efx_qword_t) / EFX_BUF_SIZE) - + unsigned int rx_dpcpu_fw_id; + unsigned int tx_dpcpu_fw_id; + unsigned int vport_id; + bool must_probe_vswitching; + unsigned int pf_index; + u8 port_id[ETH_ALEN]; #ifdef CONFIG_SFC_SRIOV - -/* SIENA */ -static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) -{ - return efx->vf_count != 0; -} - -static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) -{ - return efx->vf_init_count != 0; -} - -static inline unsigned int efx_vf_size(struct efx_nic *efx) -{ - return 1 << efx->vi_scale; -} + unsigned int vf_index; + struct ef10_vf *vf; +#endif + u8 vport_mac[ETH_ALEN]; +}; int efx_init_sriov(void); -void efx_siena_sriov_probe(struct efx_nic *efx); -int efx_siena_sriov_init(struct efx_nic *efx); -void efx_siena_sriov_mac_address_changed(struct efx_nic *efx); -void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); -void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); -void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event); -void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); -void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr); -void efx_siena_sriov_reset(struct efx_nic *efx); -void efx_siena_sriov_fini(struct efx_nic *efx); void efx_fini_sriov(void); -/* EF10 */ -static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; } -static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } -static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {} -static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} -static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {} - -#else - -/* SIENA */ -static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; } -static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; } -static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; } -static inline int efx_init_sriov(void) { return 0; } -static inline void efx_siena_sriov_probe(struct efx_nic *efx) {} -static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } -static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {} -static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, - efx_qword_t *event) {} -static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, - efx_qword_t *event) {} -static inline void efx_siena_sriov_event(struct efx_channel *channel, - efx_qword_t *event) {} -static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, - unsigned dmaq) {} -static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {} -static inline void efx_siena_sriov_reset(struct efx_nic *efx) {} -static inline void efx_siena_sriov_fini(struct efx_nic *efx) {} -static inline void efx_fini_sriov(void) {} - -/* EF10 */ -static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; } -static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } -static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {} -static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {} -static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {} - -#endif - -/* FALCON */ -static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; } -static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; } -static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {} -static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {} -static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {} - -int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac); -int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf, - u16 vlan, u8 qos); -int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf, - struct ifla_vf_info *ivf); -int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, - bool spoofchk); - struct ethtool_ts_info; int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); void efx_ptp_defer_probe_with_channel(struct efx_nic *efx); @@ -654,6 +586,7 @@ extern const struct efx_nic_type falcon_a1_nic_type; extern const struct efx_nic_type falcon_b0_nic_type; extern const struct efx_nic_type siena_a0_nic_type; extern const struct efx_nic_type efx_hunt_a0_nic_type; +extern const struct efx_nic_type efx_hunt_a0_vf_nic_type; /************************************************************************** * diff --git a/kernel/drivers/net/ethernet/sfc/ptp.c b/kernel/drivers/net/ethernet/sfc/ptp.c index a2e9aee05..c771e0af4 100644 --- a/kernel/drivers/net/ethernet/sfc/ptp.c +++ b/kernel/drivers/net/ethernet/sfc/ptp.c @@ -306,7 +306,7 @@ struct efx_ptp_data { struct work_struct pps_work; struct workqueue_struct *pps_workwq; bool nic_ts_enabled; - MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX); + _MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX); unsigned int good_syncs; unsigned int fast_syncs; @@ -389,11 +389,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats) MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), NULL); - if (rc) { - netif_err(efx, hw, efx->net_dev, - "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc); + if (rc) memset(outbuf, 0, sizeof(outbuf)); - } efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT, efx_ptp_stat_mask, stats, _MCDI_PTR(outbuf, 0), false); @@ -404,8 +401,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats) /* For Siena platforms NIC time is s and ns */ static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) { - struct timespec ts = ns_to_timespec(ns); - *nic_major = ts.tv_sec; + struct timespec64 ts = ns_to_timespec64(ns); + *nic_major = (u32)ts.tv_sec; *nic_minor = ts.tv_nsec; } @@ -434,8 +431,8 @@ static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor, */ static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor) { - struct timespec ts = ns_to_timespec(ns); - u32 maj = ts.tv_sec; + struct timespec64 ts = ns_to_timespec64(ns); + u32 maj = (u32)ts.tv_sec; u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT + (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT); @@ -490,14 +487,20 @@ static int efx_ptp_get_attributes(struct efx_nic *efx) */ MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); - rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &out_len); - if (rc == 0) + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &out_len); + if (rc == 0) { fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT); - else if (rc == -EINVAL) + } else if (rc == -EINVAL) { fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS; - else + } else if (rc == -EPERM) { + netif_info(efx, probe, efx->net_dev, "no PTP support\n"); return rc; + } else { + efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), + outbuf, sizeof(outbuf), rc); + return rc; + } if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) { ptp->ns_to_nic_time = efx_ptp_ns_to_s27; @@ -541,8 +544,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx) MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS); MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); - rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); if (rc == 0) { efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf, PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT); @@ -558,6 +561,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx) efx->ptp_data->ts_corrections.pps_out = 0; efx->ptp_data->ts_corrections.pps_in = 0; } else { + efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf, + sizeof(outbuf), rc); return rc; } @@ -568,7 +573,7 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx) static int efx_ptp_enable(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN); - MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0); + MCDI_DECLARE_BUF_ERR(outbuf); int rc; MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); @@ -596,7 +601,7 @@ static int efx_ptp_enable(struct efx_nic *efx) static int efx_ptp_disable(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN); - MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0); + MCDI_DECLARE_BUF_ERR(outbuf); int rc; MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); @@ -604,7 +609,12 @@ static int efx_ptp_disable(struct efx_nic *efx) rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), NULL); rc = (rc == -EALREADY) ? 0 : rc; - if (rc) + /* If we get ENOSYS, the NIC doesn't support PTP, and thus this function + * should only have been called during probe. + */ + if (rc == -ENOSYS || rc == -EPERM) + netif_info(efx, probe, efx->net_dev, "no PTP support\n"); + else if (rc) efx_mcdi_display_error(efx, MC_CMD_PTP, MC_CMD_PTP_IN_DISABLE_LEN, outbuf, sizeof(outbuf), rc); @@ -636,28 +646,28 @@ static void efx_ptp_send_times(struct efx_nic *efx, struct pps_event_time *last_time) { struct pps_event_time now; - struct timespec limit; + struct timespec64 limit; struct efx_ptp_data *ptp = efx->ptp_data; - struct timespec start; + struct timespec64 start; int *mc_running = ptp->start.addr; pps_get_ts(&now); start = now.ts_real; limit = now.ts_real; - timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS); + timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS); /* Write host time for specified period or until MC is done */ - while ((timespec_compare(&now.ts_real, &limit) < 0) && + while ((timespec64_compare(&now.ts_real, &limit) < 0) && ACCESS_ONCE(*mc_running)) { - struct timespec update_time; + struct timespec64 update_time; unsigned int host_time; /* Don't update continuously to avoid saturating the PCIe bus */ update_time = now.ts_real; - timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); + timespec64_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); do { pps_get_ts(&now); - } while ((timespec_compare(&now.ts_real, &update_time) < 0) && + } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && ACCESS_ONCE(*mc_running)); /* Synchronise NIC with single word of time only */ @@ -713,7 +723,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), struct efx_ptp_data *ptp = efx->ptp_data; u32 last_sec; u32 start_sec; - struct timespec delta; + struct timespec64 delta; ktime_t mc_time; if (number_readings == 0) @@ -727,14 +737,14 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), */ for (i = 0; i < number_readings; i++) { s32 window, corrected; - struct timespec wait; + struct timespec64 wait; efx_ptp_read_timeset( MCDI_ARRAY_STRUCT_PTR(synch_buf, PTP_OUT_SYNCHRONIZE_TIMESET, i), &ptp->timeset[i]); - wait = ktime_to_timespec( + wait = ktime_to_timespec64( ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0)); window = ptp->timeset[i].window; corrected = window - wait.tv_nsec; @@ -793,7 +803,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), ptp->timeset[last_good].minor, 0); /* Calculate delay from NIC top of second to last_time */ - delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec; + delta.tv_nsec += ktime_to_timespec64(mc_time).tv_nsec; /* Set PPS timestamp to match NIC top of second */ ptp->host_time_pps = *last_time; diff --git a/kernel/drivers/net/ethernet/sfc/selftest.c b/kernel/drivers/net/ethernet/sfc/selftest.c index b605dfd5c..9d78830da 100644 --- a/kernel/drivers/net/ethernet/sfc/selftest.c +++ b/kernel/drivers/net/ethernet/sfc/selftest.c @@ -114,7 +114,10 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) if (efx->type->test_nvram) { rc = efx->type->test_nvram(efx); - tests->nvram = rc ? -1 : 1; + if (rc == -EPERM) + rc = 0; + else + tests->nvram = rc ? -1 : 1; } return rc; @@ -253,6 +256,12 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, mutex_lock(&efx->mac_lock); rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); mutex_unlock(&efx->mac_lock); + if (rc == -EPERM) + rc = 0; + else + netif_info(efx, drv, efx->net_dev, + "%s phy selftest\n", rc ? "Failed" : "Passed"); + return rc; } @@ -661,6 +670,9 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, wmb(); kfree(state); + if (rc == -EPERM) + rc = 0; + return rc; } diff --git a/kernel/drivers/net/ethernet/sfc/siena.c b/kernel/drivers/net/ethernet/sfc/siena.c index f12c81193..2219b5424 100644 --- a/kernel/drivers/net/ethernet/sfc/siena.c +++ b/kernel/drivers/net/ethernet/sfc/siena.c @@ -25,6 +25,7 @@ #include "mcdi.h" #include "mcdi_pcol.h" #include "selftest.h" +#include "siena_sriov.h" /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ @@ -261,6 +262,7 @@ static int siena_probe_nic(struct efx_nic *efx) } efx->max_channels = EFX_MAX_CHANNELS; + efx->max_tx_channels = EFX_MAX_CHANNELS; efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; @@ -306,7 +308,9 @@ static int siena_probe_nic(struct efx_nic *efx) if (rc) goto fail5; +#ifdef CONFIG_SFC_SRIOV efx_siena_sriov_probe(efx); +#endif efx_ptp_defer_probe_with_channel(efx); return 0; @@ -321,7 +325,8 @@ fail1: return rc; } -static void siena_rx_push_rss_config(struct efx_nic *efx) +static int siena_rx_push_rss_config(struct efx_nic *efx, bool user, + const u32 *rx_indir_table) { efx_oword_t temp; @@ -343,7 +348,11 @@ static void siena_rx_push_rss_config(struct efx_nic *efx) FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); + memcpy(efx->rx_indir_table, rx_indir_table, + sizeof(efx->rx_indir_table)); efx_farch_rx_push_indir_table(efx); + + return 0; } /* This call performs hardware-specific global initialisation, such as @@ -386,7 +395,7 @@ static int siena_init_nic(struct efx_nic *efx) EFX_RX_USR_BUF_SIZE >> 5); efx_writeo(efx, &temp, FR_AZ_RX_CFG); - siena_rx_push_rss_config(efx); + siena_rx_push_rss_config(efx, false, efx->rx_indir_table); /* Enable event logging */ rc = efx_mcdi_log_ctrl(efx, true, false, 0); @@ -909,6 +918,8 @@ fail: */ const struct efx_nic_type siena_a0_nic_type = { + .is_vf = false, + .mem_bar = EFX_MEM_BAR, .mem_map_size = siena_mem_map_size, .probe = siena_probe_nic, .remove = siena_remove_nic, @@ -996,11 +1007,22 @@ const struct efx_nic_type siena_a0_nic_type = { #endif .ptp_write_host_time = siena_ptp_write_host_time, .ptp_set_ts_config = siena_ptp_set_ts_config, +#ifdef CONFIG_SFC_SRIOV + .sriov_configure = efx_siena_sriov_configure, .sriov_init = efx_siena_sriov_init, .sriov_fini = efx_siena_sriov_fini, - .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed, .sriov_wanted = efx_siena_sriov_wanted, .sriov_reset = efx_siena_sriov_reset, + .sriov_flr = efx_siena_sriov_flr, + .sriov_set_vf_mac = efx_siena_sriov_set_vf_mac, + .sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan, + .sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk, + .sriov_get_vf_config = efx_siena_sriov_get_vf_config, + .vswitching_probe = efx_port_dummy_op_int, + .vswitching_restore = efx_port_dummy_op_int, + .vswitching_remove = efx_port_dummy_op_void, + .set_mac_address = efx_siena_sriov_mac_address_changed, +#endif .revision = EFX_REV_SIENA_A0, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, @@ -1021,9 +1043,5 @@ const struct efx_nic_type siena_a0_nic_type = { .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | - 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC | - 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ), + 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), }; diff --git a/kernel/drivers/net/ethernet/sfc/siena_sriov.c b/kernel/drivers/net/ethernet/sfc/siena_sriov.c index fe8343079..da7b94f34 100644 --- a/kernel/drivers/net/ethernet/sfc/siena_sriov.c +++ b/kernel/drivers/net/ethernet/sfc/siena_sriov.c @@ -16,6 +16,7 @@ #include "filter.h" #include "mcdi_pcol.h" #include "farch_regs.h" +#include "siena_sriov.h" #include "vfdi.h" /* Number of longs required to track all the VIs in a VF */ @@ -38,7 +39,7 @@ enum efx_vf_tx_filter_mode { }; /** - * struct efx_vf - Back-end resource and protocol state for a PCI VF + * struct siena_vf - Back-end resource and protocol state for a PCI VF * @efx: The Efx NIC owning this VF * @pci_rid: The PCI requester ID for this VF * @pci_name: The PCI name (formatted address) of this VF @@ -83,7 +84,7 @@ enum efx_vf_tx_filter_mode { * @rxq_retry_count: Number of receive queues in @rxq_retry_mask. * @reset_work: Work item to schedule a VF reset. */ -struct efx_vf { +struct siena_vf { struct efx_nic *efx; unsigned int pci_rid; char pci_name[13]; /* dddd:bb:dd.f */ @@ -189,7 +190,7 @@ MODULE_PARM_DESC(max_vfs, */ static struct workqueue_struct *vfdi_workqueue; -static unsigned abs_index(struct efx_vf *vf, unsigned index) +static unsigned abs_index(struct siena_vf *vf, unsigned index) { return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index; } @@ -207,8 +208,8 @@ static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable, MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE); MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count); - rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN, - outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN, + outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen); if (rc) return rc; if (outlen < MC_CMD_SRIOV_OUT_LEN) @@ -299,7 +300,7 @@ out: /* The TX filter is entirely controlled by this driver, and is modified * underneath the feet of the VF */ -static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf) +static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf) { struct efx_nic *efx = vf->efx; struct efx_filter_spec filter; @@ -343,7 +344,7 @@ static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf) } /* The RX filter is managed here on behalf of the VF driver */ -static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf) +static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf) { struct efx_nic *efx = vf->efx; struct efx_filter_spec filter; @@ -382,7 +383,7 @@ static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf) } } -static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf) +static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf) { struct efx_nic *efx = vf->efx; struct siena_nic_data *nic_data = efx->nic_data; @@ -397,7 +398,7 @@ static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf) * local_page_list, either by acquiring local_lock or by running from * efx_siena_sriov_peer_work() */ -static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf) +static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf) { struct efx_nic *efx = vf->efx; struct siena_nic_data *nic_data = efx->nic_data; @@ -509,8 +510,9 @@ static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count) * Optionally set VF index and VI index within the VF. */ static bool map_vi_index(struct efx_nic *efx, unsigned abs_index, - struct efx_vf **vf_out, unsigned *rel_index_out) + struct siena_vf **vf_out, unsigned *rel_index_out) { + struct siena_nic_data *nic_data = efx->nic_data; unsigned vf_i; if (abs_index < EFX_VI_BASE) @@ -520,13 +522,13 @@ static bool map_vi_index(struct efx_nic *efx, unsigned abs_index, return true; if (vf_out) - *vf_out = efx->vf + vf_i; + *vf_out = nic_data->vf + vf_i; if (rel_index_out) *rel_index_out = abs_index % efx_vf_size(efx); return false; } -static int efx_vfdi_init_evq(struct efx_vf *vf) +static int efx_vfdi_init_evq(struct siena_vf *vf) { struct efx_nic *efx = vf->efx; struct vfdi_req *req = vf->buf.addr; @@ -567,7 +569,7 @@ static int efx_vfdi_init_evq(struct efx_vf *vf) return VFDI_RC_SUCCESS; } -static int efx_vfdi_init_rxq(struct efx_vf *vf) +static int efx_vfdi_init_rxq(struct siena_vf *vf) { struct efx_nic *efx = vf->efx; struct vfdi_req *req = vf->buf.addr; @@ -608,7 +610,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf) return VFDI_RC_SUCCESS; } -static int efx_vfdi_init_txq(struct efx_vf *vf) +static int efx_vfdi_init_txq(struct siena_vf *vf) { struct efx_nic *efx = vf->efx; struct vfdi_req *req = vf->buf.addr; @@ -655,7 +657,7 @@ static int efx_vfdi_init_txq(struct efx_vf *vf) } /* Returns true when efx_vfdi_fini_all_queues should wake */ -static bool efx_vfdi_flush_wake(struct efx_vf *vf) +static bool efx_vfdi_flush_wake(struct siena_vf *vf) { /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */ smp_mb(); @@ -664,7 +666,7 @@ static bool efx_vfdi_flush_wake(struct efx_vf *vf) atomic_read(&vf->rxq_retry_count); } -static void efx_vfdi_flush_clear(struct efx_vf *vf) +static void efx_vfdi_flush_clear(struct siena_vf *vf) { memset(vf->txq_mask, 0, sizeof(vf->txq_mask)); vf->txq_count = 0; @@ -674,7 +676,7 @@ static void efx_vfdi_flush_clear(struct efx_vf *vf) atomic_set(&vf->rxq_retry_count, 0); } -static int efx_vfdi_fini_all_queues(struct efx_vf *vf) +static int efx_vfdi_fini_all_queues(struct siena_vf *vf) { struct efx_nic *efx = vf->efx; efx_oword_t reg; @@ -757,7 +759,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) return timeout ? 0 : VFDI_RC_ETIMEDOUT; } -static int efx_vfdi_insert_filter(struct efx_vf *vf) +static int efx_vfdi_insert_filter(struct siena_vf *vf) { struct efx_nic *efx = vf->efx; struct siena_nic_data *nic_data = efx->nic_data; @@ -789,7 +791,7 @@ static int efx_vfdi_insert_filter(struct efx_vf *vf) return VFDI_RC_SUCCESS; } -static int efx_vfdi_remove_all_filters(struct efx_vf *vf) +static int efx_vfdi_remove_all_filters(struct siena_vf *vf) { struct efx_nic *efx = vf->efx; struct siena_nic_data *nic_data = efx->nic_data; @@ -801,7 +803,7 @@ static int efx_vfdi_remove_all_filters(struct efx_vf *vf) return VFDI_RC_SUCCESS; } -static int efx_vfdi_set_status_page(struct efx_vf *vf) +static int efx_vfdi_set_status_page(struct siena_vf *vf) { struct efx_nic *efx = vf->efx; struct siena_nic_data *nic_data = efx->nic_data; @@ -846,7 +848,7 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf) return VFDI_RC_SUCCESS; } -static int efx_vfdi_clear_status_page(struct efx_vf *vf) +static int efx_vfdi_clear_status_page(struct siena_vf *vf) { mutex_lock(&vf->status_lock); vf->status_addr = 0; @@ -855,7 +857,7 @@ static int efx_vfdi_clear_status_page(struct efx_vf *vf) return VFDI_RC_SUCCESS; } -typedef int (*efx_vfdi_op_t)(struct efx_vf *vf); +typedef int (*efx_vfdi_op_t)(struct siena_vf *vf); static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = { [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq, @@ -870,7 +872,7 @@ static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = { static void efx_siena_sriov_vfdi(struct work_struct *work) { - struct efx_vf *vf = container_of(work, struct efx_vf, req); + struct siena_vf *vf = container_of(work, struct siena_vf, req); struct efx_nic *efx = vf->efx; struct vfdi_req *req = vf->buf.addr; struct efx_memcpy_req copy[2]; @@ -936,7 +938,8 @@ static void efx_siena_sriov_vfdi(struct work_struct *work) * event ring in guest memory with VFDI reset events, then (re-initialise) the * event queue to raise an interrupt. The guest driver will then recover. */ -static void efx_siena_sriov_reset_vf(struct efx_vf *vf, + +static void efx_siena_sriov_reset_vf(struct siena_vf *vf, struct efx_buffer *buffer) { struct efx_nic *efx = vf->efx; @@ -1006,7 +1009,7 @@ static void efx_siena_sriov_reset_vf(struct efx_vf *vf, static void efx_siena_sriov_reset_vf_work(struct work_struct *work) { - struct efx_vf *vf = container_of(work, struct efx_vf, req); + struct siena_vf *vf = container_of(work, struct siena_vf, req); struct efx_nic *efx = vf->efx; struct efx_buffer buf; @@ -1055,8 +1058,10 @@ void efx_siena_sriov_probe(struct efx_nic *efx) if (!max_vfs) return; - if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) + if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) { + netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n"); return; + } if (count > 0 && count > max_vfs) count = max_vfs; @@ -1077,7 +1082,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data) peer_work); struct efx_nic *efx = nic_data->efx; struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; - struct efx_vf *vf; + struct siena_vf *vf; struct efx_local_addr *local_addr; struct vfdi_endpoint *peer; struct efx_endpoint_page *epp; @@ -1099,7 +1104,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data) peer_space = ARRAY_SIZE(vfdi_status->peers) - 1; peer_count = 1; for (pos = 0; pos < efx->vf_count; ++pos) { - vf = efx->vf + pos; + vf = nic_data->vf + pos; mutex_lock(&vf->status_lock); if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) { @@ -1155,7 +1160,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data) /* Finally, push the pages */ for (pos = 0; pos < efx->vf_count; ++pos) { - vf = efx->vf + pos; + vf = nic_data->vf + pos; mutex_lock(&vf->status_lock); if (vf->status_addr) @@ -1190,14 +1195,16 @@ static void efx_siena_sriov_free_local(struct efx_nic *efx) static int efx_siena_sriov_vf_alloc(struct efx_nic *efx) { unsigned index; - struct efx_vf *vf; + struct siena_vf *vf; + struct siena_nic_data *nic_data = efx->nic_data; - efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL); - if (!efx->vf) + nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf), + GFP_KERNEL); + if (!nic_data->vf) return -ENOMEM; for (index = 0; index < efx->vf_count; ++index) { - vf = efx->vf + index; + vf = nic_data->vf + index; vf->efx = efx; vf->index = index; @@ -1216,11 +1223,12 @@ static int efx_siena_sriov_vf_alloc(struct efx_nic *efx) static void efx_siena_sriov_vfs_fini(struct efx_nic *efx) { - struct efx_vf *vf; + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; unsigned int pos; for (pos = 0; pos < efx->vf_count; ++pos) { - vf = efx->vf + pos; + vf = nic_data->vf + pos; efx_nic_free_buffer(efx, &vf->buf); kfree(vf->peer_page_addrs); @@ -1237,7 +1245,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx) struct siena_nic_data *nic_data = efx->nic_data; unsigned index, devfn, sriov, buftbl_base; u16 offset, stride; - struct efx_vf *vf; + struct siena_vf *vf; int rc; sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV); @@ -1250,7 +1258,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx) buftbl_base = nic_data->vf_buftbl_base; devfn = pci_dev->devfn + offset; for (index = 0; index < efx->vf_count; ++index) { - vf = efx->vf + index; + vf = nic_data->vf + index; /* Reserve buffer entries */ vf->buftbl_base = buftbl_base; @@ -1350,7 +1358,7 @@ fail_pci: fail_vfs: cancel_work_sync(&nic_data->peer_work); efx_siena_sriov_free_local(efx); - kfree(efx->vf); + kfree(nic_data->vf); fail_alloc: efx_nic_free_buffer(efx, &nic_data->vfdi_status); fail_status: @@ -1361,7 +1369,7 @@ fail_cmd: void efx_siena_sriov_fini(struct efx_nic *efx) { - struct efx_vf *vf; + struct siena_vf *vf; unsigned int pos; struct siena_nic_data *nic_data = efx->nic_data; @@ -1377,7 +1385,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx) /* Flush all reconfiguration work */ for (pos = 0; pos < efx->vf_count; ++pos) { - vf = efx->vf + pos; + vf = nic_data->vf + pos; cancel_work_sync(&vf->req); cancel_work_sync(&vf->reset_work); } @@ -1388,7 +1396,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx) /* Tear down back-end state */ efx_siena_sriov_vfs_fini(efx); efx_siena_sriov_free_local(efx); - kfree(efx->vf); + kfree(nic_data->vf); efx_nic_free_buffer(efx, &nic_data->vfdi_status); efx_siena_sriov_cmd(efx, false, NULL, NULL); } @@ -1396,7 +1404,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx) void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; - struct efx_vf *vf; + struct siena_vf *vf; unsigned qid, seq, type, data; qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID); @@ -1452,11 +1460,12 @@ error: void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i) { - struct efx_vf *vf; + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; if (vf_i > efx->vf_init_count) return; - vf = efx->vf + vf_i; + vf = nic_data->vf + vf_i; netif_info(efx, hw, efx->net_dev, "FLR on VF %s\n", vf->pci_name); @@ -1467,21 +1476,23 @@ void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i) vf->evq0_count = 0; } -void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) +int efx_siena_sriov_mac_address_changed(struct efx_nic *efx) { struct siena_nic_data *nic_data = efx->nic_data; struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr; if (!efx->vf_init_count) - return; + return 0; ether_addr_copy(vfdi_status->peers[0].mac_addr, efx->net_dev->dev_addr); queue_work(vfdi_workqueue, &nic_data->peer_work); + + return 0; } void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) { - struct efx_vf *vf; + struct siena_vf *vf; unsigned queue, qid; queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); @@ -1500,7 +1511,7 @@ void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) { - struct efx_vf *vf; + struct siena_vf *vf; unsigned ev_failed, queue, qid; queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); @@ -1525,7 +1536,7 @@ void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) /* Called from napi. Schedule the reset work item */ void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) { - struct efx_vf *vf; + struct siena_vf *vf; unsigned int rel; if (map_vi_index(efx, dmaq, &vf, &rel)) @@ -1541,9 +1552,10 @@ void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq) /* Reset all VFs */ void efx_siena_sriov_reset(struct efx_nic *efx) { + struct siena_nic_data *nic_data = efx->nic_data; unsigned int vf_i; struct efx_buffer buf; - struct efx_vf *vf; + struct siena_vf *vf; ASSERT_RTNL(); @@ -1557,7 +1569,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx) return; for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { - vf = efx->vf + vf_i; + vf = nic_data->vf + vf_i; efx_siena_sriov_reset_vf(vf, &buf); } @@ -1573,7 +1585,6 @@ int efx_init_sriov(void) vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi"); if (!vfdi_workqueue) return -ENOMEM; - return 0; } @@ -1582,14 +1593,14 @@ void efx_fini_sriov(void) destroy_workqueue(vfdi_workqueue); } -int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) +int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac) { - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_vf *vf; + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; if (vf_i >= efx->vf_init_count) return -EINVAL; - vf = efx->vf + vf_i; + vf = nic_data->vf + vf_i; mutex_lock(&vf->status_lock); ether_addr_copy(vf->addr.mac_addr, mac); @@ -1599,16 +1610,16 @@ int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) return 0; } -int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, +int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, u8 qos) { - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_vf *vf; + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; u16 tci; if (vf_i >= efx->vf_init_count) return -EINVAL; - vf = efx->vf + vf_i; + vf = nic_data->vf + vf_i; mutex_lock(&vf->status_lock); tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT); @@ -1619,16 +1630,16 @@ int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, return 0; } -int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, +int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i, bool spoofchk) { - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_vf *vf; + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; int rc; if (vf_i >= efx->vf_init_count) return -EINVAL; - vf = efx->vf + vf_i; + vf = nic_data->vf + vf_i; mutex_lock(&vf->txq_lock); if (vf->txq_count == 0) { @@ -1643,16 +1654,16 @@ int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, return rc; } -int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i, +int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i, struct ifla_vf_info *ivi) { - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_vf *vf; + struct siena_nic_data *nic_data = efx->nic_data; + struct siena_vf *vf; u16 tci; if (vf_i >= efx->vf_init_count) return -EINVAL; - vf = efx->vf + vf_i; + vf = nic_data->vf + vf_i; ivi->vf = vf_i; ether_addr_copy(ivi->mac, vf->addr.mac_addr); @@ -1666,3 +1677,12 @@ int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i, return 0; } +bool efx_siena_sriov_wanted(struct efx_nic *efx) +{ + return efx->vf_count != 0; +} + +int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs) +{ + return 0; +} diff --git a/kernel/drivers/net/ethernet/sfc/siena_sriov.h b/kernel/drivers/net/ethernet/sfc/siena_sriov.h new file mode 100644 index 000000000..d88d4dab1 --- /dev/null +++ b/kernel/drivers/net/ethernet/sfc/siena_sriov.h @@ -0,0 +1,79 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2015 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef SIENA_SRIOV_H +#define SIENA_SRIOV_H + +#include "net_driver.h" + +/* On the SFC9000 family each port is associated with 1 PCI physical + * function (PF) handled by sfc and a configurable number of virtual + * functions (VFs) that may be handled by some other driver, often in + * a VM guest. The queue pointer registers are mapped in both PF and + * VF BARs such that an 8K region provides access to a single RX, TX + * and event queue (collectively a Virtual Interface, VI or VNIC). + * + * The PF has access to all 1024 VIs while VFs are mapped to VIs + * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered + * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE). + * The number of VIs and the VI_SCALE value are configurable but must + * be established at boot time by firmware. + */ + +/* Maximum VI_SCALE parameter supported by Siena */ +#define EFX_VI_SCALE_MAX 6 +/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX), + * so this is the smallest allowed value. + */ +#define EFX_VI_BASE 128U +/* Maximum number of VFs allowed */ +#define EFX_VF_COUNT_MAX 127 +/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */ +#define EFX_MAX_VF_EVQ_SIZE 8192UL +/* The number of buffer table entries reserved for each VI on a VF */ +#define EFX_VF_BUFTBL_PER_VI \ + ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \ + sizeof(efx_qword_t) / EFX_BUF_SIZE) + +int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs); +int efx_siena_sriov_init(struct efx_nic *efx); +void efx_siena_sriov_fini(struct efx_nic *efx); +int efx_siena_sriov_mac_address_changed(struct efx_nic *efx); +bool efx_siena_sriov_wanted(struct efx_nic *efx); +void efx_siena_sriov_reset(struct efx_nic *efx); +void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr); + +int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac); +int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf, + u16 vlan, u8 qos); +int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf, + bool spoofchk); +int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf, + struct ifla_vf_info *ivf); + +#ifdef CONFIG_SFC_SRIOV + +static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) +{ + return efx->vf_init_count != 0; +} +#else /* !CONFIG_SFC_SRIOV */ +static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) +{ + return false; +} +#endif /* CONFIG_SFC_SRIOV */ + +void efx_siena_sriov_probe(struct efx_nic *efx); +void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event); +void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event); +void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq); + +#endif /* SIENA_SRIOV_H */ diff --git a/kernel/drivers/net/ethernet/sfc/sriov.c b/kernel/drivers/net/ethernet/sfc/sriov.c new file mode 100644 index 000000000..816c44689 --- /dev/null +++ b/kernel/drivers/net/ethernet/sfc/sriov.c @@ -0,0 +1,83 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2014-2015 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include <linux/module.h> +#include "net_driver.h" +#include "nic.h" +#include "sriov.h" + +int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_mac) + return efx->type->sriov_set_vf_mac(efx, vf_i, mac); + else + return -EOPNOTSUPP; +} + +int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan, + u8 qos) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_vlan) { + if ((vlan & ~VLAN_VID_MASK) || + (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT))) + return -EINVAL; + + return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos); + } else { + return -EOPNOTSUPP; + } +} + +int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, + bool spoofchk) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_spoofchk) + return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk); + else + return -EOPNOTSUPP; +} + +int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, + struct ifla_vf_info *ivi) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_get_vf_config) + return efx->type->sriov_get_vf_config(efx, vf_i, ivi); + else + return -EOPNOTSUPP; +} + +int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, + int link_state) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_set_vf_link_state) + return efx->type->sriov_set_vf_link_state(efx, vf_i, + link_state); + else + return -EOPNOTSUPP; +} + +int efx_sriov_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_get_phys_port_id) + return efx->type->sriov_get_phys_port_id(efx, ppid); + else + return -EOPNOTSUPP; +} diff --git a/kernel/drivers/net/ethernet/sfc/sriov.h b/kernel/drivers/net/ethernet/sfc/sriov.h new file mode 100644 index 000000000..400df5265 --- /dev/null +++ b/kernel/drivers/net/ethernet/sfc/sriov.h @@ -0,0 +1,31 @@ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2014-2015 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_SRIOV_H +#define EFX_SRIOV_H + +#include "net_driver.h" + +#ifdef CONFIG_SFC_SRIOV + +int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac); +int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan, + u8 qos); +int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i, + bool spoofchk); +int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, + struct ifla_vf_info *ivi); +int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, + int link_state); +int efx_sriov_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid); + +#endif /* CONFIG_SFC_SRIOV */ + +#endif /* EFX_SRIOV_H */ diff --git a/kernel/drivers/net/ethernet/sfc/tx.c b/kernel/drivers/net/ethernet/sfc/tx.c index aaf298751..67f6afaa0 100644 --- a/kernel/drivers/net/ethernet/sfc/tx.c +++ b/kernel/drivers/net/ethernet/sfc/tx.c @@ -431,8 +431,20 @@ finish_packet: efx_tx_maybe_stop_queue(tx_queue); /* Pass off to hardware */ - if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) + if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { + struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); + + /* There could be packets left on the partner queue if those + * SKBs had skb->xmit_more set. If we do not push those they + * could be left for a long time and cause a netdev watchdog. + */ + if (txq2->xmit_more_available) + efx_nic_push_buffers(txq2); + efx_nic_push_buffers(tx_queue); + } else { + tx_queue->xmit_more_available = skb->xmit_more; + } tx_queue->tx_packets++; @@ -617,7 +629,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); - netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); + tx_queue->pkts_compl += pkts_compl; + tx_queue->bytes_compl += bytes_compl; if (pkts_compl > 1) ++tx_queue->merge_events; @@ -721,6 +734,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) tx_queue->read_count = 0; tx_queue->old_read_count = 0; tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; + tx_queue->xmit_more_available = false; /* Set up TX descriptor ring */ efx_nic_init_tx(tx_queue); @@ -746,6 +760,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) ++tx_queue->read_count; } + tx_queue->xmit_more_available = false; netdev_tx_reset_queue(tx_queue->core_txq); } @@ -1301,8 +1316,20 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, efx_tx_maybe_stop_queue(tx_queue); /* Pass off to hardware */ - if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) + if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { + struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); + + /* There could be packets left on the partner queue if those + * SKBs had skb->xmit_more set. If we do not push those they + * could be left for a long time and cause a netdev watchdog. + */ + if (txq2->xmit_more_available) + efx_nic_push_buffers(txq2); + efx_nic_push_buffers(tx_queue); + } else { + tx_queue->xmit_more_available = skb->xmit_more; + } tx_queue->tso_bursts++; return NETDEV_TX_OK; diff --git a/kernel/drivers/net/ethernet/sfc/txc43128_phy.c b/kernel/drivers/net/ethernet/sfc/txc43128_phy.c index 3d5ee3259..194f67d9f 100644 --- a/kernel/drivers/net/ethernet/sfc/txc43128_phy.c +++ b/kernel/drivers/net/ethernet/sfc/txc43128_phy.c @@ -418,7 +418,7 @@ static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd) val |= (1 << TXC_GLCMD_LMTSWRST_LBN); efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); - while (tries--) { + while (--tries) { val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) break; |