diff options
Diffstat (limited to 'kernel/drivers/net/ethernet')
80 files changed, 967 insertions, 484 deletions
diff --git a/kernel/drivers/net/ethernet/atheros/alx/main.c b/kernel/drivers/net/ethernet/atheros/alx/main.c index bd377a6b0..df54475d1 100644 --- a/kernel/drivers/net/ethernet/atheros/alx/main.c +++ b/kernel/drivers/net/ethernet/atheros/alx/main.c @@ -86,9 +86,14 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) while (!cur_buf->skb && next != rxq->read_idx) { struct alx_rfd *rfd = &rxq->rfd[cur]; - skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); + skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp); if (!skb) break; + + /* Workround for the HW RX DMA overflow issue */ + if (((unsigned long)skb->data & 0xfff) == 0xfc0) + skb_reserve(skb, 64); + dma = dma_map_single(&alx->hw.pdev->dev, skb->data, alx->rxbuf_size, DMA_FROM_DEVICE); diff --git a/kernel/drivers/net/ethernet/atheros/atlx/atl2.c b/kernel/drivers/net/ethernet/atheros/atlx/atl2.c index 8f76f4558..2ff465848 100644 --- a/kernel/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/kernel/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -EIO; - netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX; netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); /* Init PHY as early as possible due to power saving issue */ diff --git a/kernel/drivers/net/ethernet/broadcom/bcmsysport.c b/kernel/drivers/net/ethernet/broadcom/bcmsysport.c index 858106352..8860e74aa 100644 --- a/kernel/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/kernel/drivers/net/ethernet/broadcom/bcmsysport.c @@ -732,11 +732,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; struct bcm_sysport_cb *cb; - struct netdev_queue *txq; u32 hw_ind; - txq = netdev_get_tx_queue(ndev, ring->index); - /* Compute how many descriptors have been processed since last call */ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; @@ -767,9 +764,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, ring->c_index = c_index; - if (netif_tx_queue_stopped(txq) && pkts_compl) - netif_tx_wake_queue(txq); - netif_dbg(priv, tx_done, ndev, "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", ring->index, ring->c_index, pkts_compl, bytes_compl); @@ -781,16 +775,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { + struct netdev_queue *txq; unsigned int released; unsigned long flags; + txq = netdev_get_tx_queue(priv->netdev, ring->index); + spin_lock_irqsave(&ring->lock, flags); released = __bcm_sysport_tx_reclaim(priv, ring); + if (released) + netif_tx_wake_queue(txq); + spin_unlock_irqrestore(&ring->lock, flags); return released; } +/* Locked version of the per-ring TX reclaim, but does not wake the queue */ +static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv, + struct bcm_sysport_tx_ring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + __bcm_sysport_tx_reclaim(priv, ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) { struct bcm_sysport_tx_ring *ring = @@ -1275,7 +1286,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, napi_disable(&ring->napi); netif_napi_del(&ring->napi); - bcm_sysport_tx_reclaim(priv, ring); + bcm_sysport_tx_clean(priv, ring); kfree(ring->cbs); ring->cbs = NULL; diff --git a/kernel/drivers/net/ethernet/broadcom/bgmac.c b/kernel/drivers/net/ethernet/broadcom/bgmac.c index 28f7610b0..b56c9c581 100644 --- a/kernel/drivers/net/ethernet/broadcom/bgmac.c +++ b/kernel/drivers/net/ethernet/broadcom/bgmac.c @@ -219,7 +219,7 @@ err_dma: dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb), DMA_TO_DEVICE); - while (i > 0) { + while (i-- > 0) { int index = (ring->end + i) % BGMAC_TX_RING_SLOTS; struct bgmac_slot_info *slot = &ring->slots[index]; u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1); @@ -314,6 +314,10 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, u32 ctl; ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); + + /* preserve ONLY bits 16-17 from current hardware value */ + ctl &= BGMAC_DMA_RX_ADDREXT_MASK; + if (bgmac->core->id.rev >= 4) { ctl &= ~BGMAC_DMA_RX_BL_MASK; ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; @@ -324,7 +328,6 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, ctl &= ~BGMAC_DMA_RX_PT_MASK; ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; } - ctl &= BGMAC_DMA_RX_ADDREXT_MASK; ctl |= BGMAC_DMA_RX_ENABLE; ctl |= BGMAC_DMA_RX_PARITY_DISABLE; ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; diff --git a/kernel/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/kernel/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2e611dc5f..1c8123816 100644 --- a/kernel/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/kernel/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -14819,6 +14819,10 @@ static int bnx2x_get_fc_npiv(struct net_device *dev, } offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]); + if (!offset) { + DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n"); + goto out; + } DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset); /* Read the table contents from nvram */ diff --git a/kernel/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/kernel/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 17f017ab4..91627561c 100644 --- a/kernel/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/kernel/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1168,6 +1168,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; unsigned int pkts_compl = 0; @@ -1195,15 +1196,15 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, pkts_compl++; dev->stats.tx_packets++; dev->stats.tx_bytes += tx_cb_ptr->skb->len; - dma_unmap_single(&dev->dev, + dma_unmap_single(kdev, dma_unmap_addr(tx_cb_ptr, dma_addr), - tx_cb_ptr->skb->len, + dma_unmap_len(tx_cb_ptr, dma_len), DMA_TO_DEVICE); bcmgenet_free_cb(tx_cb_ptr); } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { dev->stats.tx_bytes += dma_unmap_len(tx_cb_ptr, dma_len); - dma_unmap_page(&dev->dev, + dma_unmap_page(kdev, dma_unmap_addr(tx_cb_ptr, dma_addr), dma_unmap_len(tx_cb_ptr, dma_len), DMA_TO_DEVICE); @@ -1308,7 +1309,7 @@ static int bcmgenet_xmit_single(struct net_device *dev, } dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); - dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len); + dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len); length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | DMA_TX_APPEND_CRC; @@ -1754,6 +1755,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) { + struct device *kdev = &priv->pdev->dev; struct enet_cb *cb; int i; @@ -1761,7 +1763,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) cb = &priv->rx_cbs[i]; if (dma_unmap_addr(cb, dma_addr)) { - dma_unmap_single(&priv->dev->dev, + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), priv->rx_buf_len, DMA_FROM_DEVICE); dma_unmap_addr_set(cb, dma_addr, 0); diff --git a/kernel/drivers/net/ethernet/broadcom/tg3.c b/kernel/drivers/net/ethernet/broadcom/tg3.c index ca5ac5d6f..49056c33b 100644 --- a/kernel/drivers/net/ethernet/broadcom/tg3.c +++ b/kernel/drivers/net/ethernet/broadcom/tg3.c @@ -18142,14 +18142,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); - /* We needn't recover from permanent error */ - if (state == pci_channel_io_frozen) - tp->pcierr_recovery = true; - /* We probably don't have netdev yet */ if (!netdev || !netif_running(netdev)) goto done; + /* We needn't recover from permanent error */ + if (state == pci_channel_io_frozen) + tp->pcierr_recovery = true; + tg3_phy_stop(tp); tg3_netif_stop(tp); @@ -18246,7 +18246,7 @@ static void tg3_io_resume(struct pci_dev *pdev) rtnl_lock(); - if (!netif_running(netdev)) + if (!netdev || !netif_running(netdev)) goto done; tg3_full_lock(tp, 0); diff --git a/kernel/drivers/net/ethernet/cadence/macb.c b/kernel/drivers/net/ethernet/cadence/macb.c index 169059c92..8d54e7b41 100644 --- a/kernel/drivers/net/ethernet/cadence/macb.c +++ b/kernel/drivers/net/ethernet/cadence/macb.c @@ -2405,9 +2405,9 @@ static int macb_init(struct platform_device *pdev) if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) val = GEM_BIT(RGMII); else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && - (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) val = MACB_BIT(RMII); - else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) val = MACB_BIT(MII); if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) @@ -2738,7 +2738,7 @@ static int at91ether_init(struct platform_device *pdev) } static const struct macb_config at91sam9260_config = { - .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII, + .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .clk_init = macb_clk_init, .init = macb_init, }; @@ -2751,21 +2751,22 @@ static const struct macb_config pc302gem_config = { }; static const struct macb_config sama5d2_config = { - .caps = 0, + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, }; static const struct macb_config sama5d3_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE + | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, }; static const struct macb_config sama5d4_config = { - .caps = 0, + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .dma_burst_length = 4, .clk_init = macb_clk_init, .init = macb_init, diff --git a/kernel/drivers/net/ethernet/cadence/macb.h b/kernel/drivers/net/ethernet/cadence/macb.h index d83b0db77..3f385ab94 100644 --- a/kernel/drivers/net/ethernet/cadence/macb.h +++ b/kernel/drivers/net/ethernet/cadence/macb.h @@ -398,7 +398,7 @@ /* Capability mask bits */ #define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 #define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 -#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 +#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 diff --git a/kernel/drivers/net/ethernet/cavium/liquidio/lio_main.c b/kernel/drivers/net/ethernet/cavium/liquidio/lio_main.c index b89504405..7445da218 100644 --- a/kernel/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/kernel/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2526,7 +2526,7 @@ static void handle_timestamp(struct octeon_device *oct, octeon_swap_8B_data(&resp->timestamp, 1); - if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) { + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { struct skb_shared_hwtstamps ts; u64 ns = resp->timestamp; diff --git a/kernel/drivers/net/ethernet/cavium/thunder/nic.h b/kernel/drivers/net/ethernet/cavium/thunder/nic.h index 39ca6744a..22471d283 100644 --- a/kernel/drivers/net/ethernet/cavium/thunder/nic.h +++ b/kernel/drivers/net/ethernet/cavium/thunder/nic.h @@ -116,6 +116,15 @@ #define NIC_PF_INTR_ID_MBOX0 8 #define NIC_PF_INTR_ID_MBOX1 9 +/* Minimum FIFO level before all packets for the CQ are dropped + * + * This value ensures that once a packet has been "accepted" + * for reception it will not get dropped due to non-availability + * of CQ descriptor. An errata in HW mandates this value to be + * atleast 0x100. + */ +#define NICPF_CQM_MIN_DROP_LEVEL 0x100 + /* Global timer for CQ timer thresh interrupts * Calculated for SCLK of 700Mhz * value written should be a 1/16th of what is expected diff --git a/kernel/drivers/net/ethernet/cavium/thunder/nic_main.c b/kernel/drivers/net/ethernet/cavium/thunder/nic_main.c index 5f24d11cb..16baaafed 100644 --- a/kernel/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/kernel/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -309,6 +309,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) static void nic_init_hw(struct nicpf *nic) { int i; + u64 cqm_cfg; /* Enable NIC HW block */ nic_reg_write(nic, NIC_PF_CFG, 0x3); @@ -345,6 +346,11 @@ static void nic_init_hw(struct nicpf *nic) /* Enable VLAN ethertype matching and stripping */ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q); + + /* Check if HW expected value is higher (could be in future chips) */ + cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG); + if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL) + nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL); } /* Channel parse index configuration */ diff --git a/kernel/drivers/net/ethernet/cavium/thunder/nic_reg.h b/kernel/drivers/net/ethernet/cavium/thunder/nic_reg.h index dd536be20..fab35a593 100644 --- a/kernel/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/kernel/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -21,7 +21,7 @@ #define NIC_PF_TCP_TIMER (0x0060) #define NIC_PF_BP_CFG (0x0080) #define NIC_PF_RRM_CFG (0x0088) -#define NIC_PF_CQM_CF (0x00A0) +#define NIC_PF_CQM_CFG (0x00A0) #define NIC_PF_CNM_CF (0x00A8) #define NIC_PF_CNM_STATUS (0x00B0) #define NIC_PF_CQ_AVG_CFG (0x00C0) @@ -170,7 +170,6 @@ #define NIC_QSET_SQ_0_7_DOOR (0x010838) #define NIC_QSET_SQ_0_7_STATUS (0x010840) #define NIC_QSET_SQ_0_7_DEBUG (0x010848) -#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860) #define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900) #define NIC_QSET_RBDR_0_1_CFG (0x010C00) diff --git a/kernel/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/kernel/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index a12b2e38c..ff1d777f3 100644 --- a/kernel/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/kernel/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -380,7 +380,10 @@ static void nicvf_get_regs(struct net_device *dev, p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); - p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q); + /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which + * produces bus errors when read + */ + p[i++] = 0; p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3); p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); diff --git a/kernel/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/kernel/drivers/net/ethernet/cavium/thunder/nicvf_main.c index dde8dc720..b7093b9cd 100644 --- a/kernel/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/kernel/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -566,8 +566,7 @@ static inline void nicvf_set_rxhash(struct net_device *netdev, static void nicvf_rcv_pkt_handler(struct net_device *netdev, struct napi_struct *napi, - struct cmp_queue *cq, - struct cqe_rx_t *cqe_rx, int cqe_type) + struct cqe_rx_t *cqe_rx) { struct sk_buff *skb; struct nicvf *nic = netdev_priv(netdev); @@ -583,7 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, } /* Check for errors */ - err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); + err = nicvf_check_cqe_rx_errs(nic, cqe_rx); if (err && !cqe_rx->rb_cnt) return; @@ -674,8 +673,7 @@ loop: cq_idx, cq_desc->cqe_type); switch (cq_desc->cqe_type) { case CQE_TYPE_RX: - nicvf_rcv_pkt_handler(netdev, napi, cq, - cq_desc, CQE_TYPE_RX); + nicvf_rcv_pkt_handler(netdev, napi, cq_desc); work_done++; break; case CQE_TYPE_SEND: @@ -1117,7 +1115,6 @@ int nicvf_stop(struct net_device *netdev) /* Clear multiqset info */ nic->pnicvf = nic; - nic->sqs_count = 0; return 0; } @@ -1346,6 +1343,9 @@ void nicvf_update_stats(struct nicvf *nic) drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok; + drv_stats->rx_frames_ok = stats->rx_ucast_frames + + stats->rx_bcast_frames + + stats->rx_mcast_frames; drv_stats->rx_drops = stats->rx_drop_red + stats->rx_drop_overrun; drv_stats->tx_drops = stats->tx_drops; diff --git a/kernel/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/kernel/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 206b6a71a..912ee28ab 100644 --- a/kernel/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/kernel/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -550,6 +550,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, nicvf_config_vlan_stripping(nic, nic->netdev->features); /* Enable Receive queue */ + memset(&rq_cfg, 0, sizeof(struct rq_cfg)); rq_cfg.ena = 1; rq_cfg.tcp_ena = 0; nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); @@ -582,6 +583,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, qidx, (u64)(cq->dmem.phys_base)); /* Enable Completion queue */ + memset(&cq_cfg, 0, sizeof(struct cq_cfg)); cq_cfg.ena = 1; cq_cfg.reset = 0; cq_cfg.caching = 0; @@ -630,6 +632,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, qidx, (u64)(sq->dmem.phys_base)); /* Enable send queue & set queue size */ + memset(&sq_cfg, 0, sizeof(struct sq_cfg)); sq_cfg.ena = 1; sq_cfg.reset = 0; sq_cfg.ldwb = 0; @@ -666,6 +669,7 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, /* Enable RBDR & set queue size */ /* Buffer size should be in multiples of 128 bytes */ + memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg)); rbdr_cfg.ena = 1; rbdr_cfg.reset = 0; rbdr_cfg.ldwb = 0; @@ -1410,16 +1414,12 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) } /* Check for errors in the receive cmp.queue entry */ -int nicvf_check_cqe_rx_errs(struct nicvf *nic, - struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) +int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) { struct nicvf_hw_stats *stats = &nic->hw_stats; - struct nicvf_drv_stats *drv_stats = &nic->drv_stats; - if (!cqe_rx->err_level && !cqe_rx->err_opcode) { - drv_stats->rx_frames_ok++; + if (!cqe_rx->err_level && !cqe_rx->err_opcode) return 0; - } if (netif_msg_rx_err(nic)) netdev_err(nic->netdev, diff --git a/kernel/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/kernel/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 033e8306e..5652c612e 100644 --- a/kernel/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/kernel/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -344,8 +344,7 @@ u64 nicvf_queue_reg_read(struct nicvf *nic, /* Stats */ void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); -int nicvf_check_cqe_rx_errs(struct nicvf *nic, - struct cmp_queue *cq, struct cqe_rx_t *cqe_rx); +int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq, struct cqe_send_t *cqe_tx); #endif /* NICVF_QUEUES_H */ diff --git a/kernel/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/kernel/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 9df26c226..42718cc7d 100644 --- a/kernel/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/kernel/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -549,7 +549,9 @@ static int bgx_xaui_check_link(struct lmac *lmac) } /* Clear rcvflt bit (latching high) and read it back */ - bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); + if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) + bgx_reg_modify(bgx, lmacid, + BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); if (bgx->use_training) { @@ -568,13 +570,6 @@ static int bgx_xaui_check_link(struct lmac *lmac) return -1; } - /* Wait for MAC RX to be ready */ - if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL, - SMU_RX_CTL_STATUS, true)) { - dev_err(&bgx->pdev->dev, "SMU RX link not okay\n"); - return -1; - } - /* Wait for BGX RX to be idle */ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); @@ -587,29 +582,30 @@ static int bgx_xaui_check_link(struct lmac *lmac) return -1; } - if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { - dev_err(&bgx->pdev->dev, "Receive fault\n"); - return -1; - } - - /* Receive link is latching low. Force it high and verify it */ - bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); - if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1, - SPU_STATUS1_RCV_LNK, false)) { - dev_err(&bgx->pdev->dev, "SPU receive link down\n"); - return -1; - } - + /* Clear receive packet disable */ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); cfg &= ~SPU_MISC_CTL_RX_DIS; bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); - return 0; + + /* Check for MAC RX faults */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL); + /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */ + cfg &= SMU_RX_CTL_STATUS; + if (!cfg) + return 0; + + /* Rx local/remote fault seen. + * Do lmac reinit to see if condition recovers + */ + bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type); + + return -1; } static void bgx_poll_for_link(struct work_struct *work) { struct lmac *lmac; - u64 link; + u64 spu_link, smu_link; lmac = container_of(work, struct lmac, dwork.work); @@ -619,8 +615,11 @@ static void bgx_poll_for_link(struct work_struct *work) bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK, false); - link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); - if (link & SPU_STATUS1_RCV_LNK) { + spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); + smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL); + + if ((spu_link & SPU_STATUS1_RCV_LNK) && + !(smu_link & SMU_RX_CTL_STATUS)) { lmac->link_up = 1; if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) lmac->last_speed = 40000; @@ -634,9 +633,15 @@ static void bgx_poll_for_link(struct work_struct *work) } if (lmac->last_link != lmac->link_up) { + if (lmac->link_up) { + if (bgx_xaui_check_link(lmac)) { + /* Errors, clear link_up state */ + lmac->link_up = 0; + lmac->last_speed = SPEED_UNKNOWN; + lmac->last_duplex = DUPLEX_UNKNOWN; + } + } lmac->last_link = lmac->link_up; - if (lmac->link_up) - bgx_xaui_check_link(lmac); } queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); @@ -708,7 +713,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) { struct lmac *lmac; - u64 cmrx_cfg; + u64 cfg; lmac = &bgx->lmac[lmacid]; if (lmac->check_link) { @@ -717,9 +722,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) destroy_workqueue(lmac->check_link); } - cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); - cmrx_cfg &= ~(1 << 15); - bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); + /* Disable packet reception */ + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cfg &= ~CMR_PKT_RX_EN; + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + /* Give chance for Rx/Tx FIFO to get drained */ + bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true); + bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true); + + /* Disable packet transmission */ + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cfg &= ~CMR_PKT_TX_EN; + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + /* Disable serdes lanes */ + if (!lmac->is_sgmii) + bgx_reg_modify(bgx, lmacid, + BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); + else + bgx_reg_modify(bgx, lmacid, + BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN); + + /* Disable LMAC */ + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cfg &= ~CMR_EN; + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + bgx_flush_dmac_addrs(bgx, lmacid); if ((bgx->lmac_type != BGX_MODE_XFI) && diff --git a/kernel/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/kernel/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 149e17936..42010d2e5 100644 --- a/kernel/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/kernel/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -41,6 +41,7 @@ #define BGX_CMRX_RX_STAT10 0xC0 #define BGX_CMRX_RX_BP_DROP 0xC8 #define BGX_CMRX_RX_DMAC_CTL 0x0E8 +#define BGX_CMRX_RX_FIFO_LEN 0x108 #define BGX_CMR_RX_DMACX_CAM 0x200 #define RX_DMACX_CAM_EN BIT_ULL(48) #define RX_DMACX_CAM_LMACID(x) (x << 49) @@ -50,6 +51,7 @@ #define BGX_CMR_CHAN_MSK_AND 0x450 #define BGX_CMR_BIST_STATUS 0x460 #define BGX_CMR_RX_LMACS 0x468 +#define BGX_CMRX_TX_FIFO_LEN 0x518 #define BGX_CMRX_TX_STAT0 0x600 #define BGX_CMRX_TX_STAT1 0x608 #define BGX_CMRX_TX_STAT2 0x610 diff --git a/kernel/drivers/net/ethernet/freescale/fec_main.c b/kernel/drivers/net/ethernet/freescale/fec_main.c index b2a32209f..ab716042b 100644 --- a/kernel/drivers/net/ethernet/freescale/fec_main.c +++ b/kernel/drivers/net/ethernet/freescale/fec_main.c @@ -944,11 +944,11 @@ fec_restart(struct net_device *ndev) * enet-mac reset will reset mac address registers too, * so need to reconfigure it. */ - if (fep->quirks & FEC_QUIRK_ENET_MAC) { - memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); - writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); - writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); - } + memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); + writel((__force u32)cpu_to_be32(temp_mac[0]), + fep->hwp + FEC_ADDR_LOW); + writel((__force u32)cpu_to_be32(temp_mac[1]), + fep->hwp + FEC_ADDR_HIGH); /* Clear any outstanding interrupt. */ writel(0xffffffff, fep->hwp + FEC_IEVENT); @@ -1557,9 +1557,15 @@ fec_enet_rx(struct net_device *ndev, int budget) struct fec_enet_private *fep = netdev_priv(ndev); for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { - clear_bit(queue_id, &fep->work_rx); - pkt_received += fec_enet_rx_queue(ndev, + int ret; + + ret = fec_enet_rx_queue(ndev, budget - pkt_received, queue_id); + + if (ret < budget - pkt_received) + clear_bit(queue_id, &fep->work_rx); + + pkt_received += ret; } return pkt_received; } diff --git a/kernel/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/kernel/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index bdbd80423..9ff2881f9 100644 --- a/kernel/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/kernel/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -900,9 +900,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) { - u8 __iomem *reg_addr = ACCESS_ONCE(base); - - writel(value, reg_addr + reg); + writel(value, base + reg); } #define dsaf_write_dev(a, reg, value) \ @@ -910,9 +908,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) { - u8 __iomem *reg_addr = ACCESS_ONCE(base); - - return readl(reg_addr + reg); + return readl(base + reg); } #define dsaf_read_dev(a, reg) \ diff --git a/kernel/drivers/net/ethernet/intel/e1000/e1000.h b/kernel/drivers/net/ethernet/intel/e1000/e1000.h index 69707108d..98fe5a2cd 100644 --- a/kernel/drivers/net/ethernet/intel/e1000/e1000.h +++ b/kernel/drivers/net/ethernet/intel/e1000/e1000.h @@ -213,8 +213,11 @@ struct e1000_rx_ring { }; #define E1000_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) \ - ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) #define E1000_RX_DESC_EXT(R, i) \ (&(((union e1000_rx_desc_extended *)((R).desc))[i])) diff --git a/kernel/drivers/net/ethernet/intel/e1000/e1000_main.c b/kernel/drivers/net/ethernet/intel/e1000/e1000_main.c index fd7be860c..068023595 100644 --- a/kernel/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/kernel/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3876,7 +3876,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, eop_desc = E1000_TX_DESC(*tx_ring, eop); } - tx_ring->next_to_clean = i; + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); netdev_completed_queue(netdev, pkts_compl, bytes_compl); diff --git a/kernel/drivers/net/ethernet/intel/e1000e/netdev.c b/kernel/drivers/net/ethernet/intel/e1000e/netdev.c index 0a854a47d..80ec587d5 100644 --- a/kernel/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/kernel/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1959,8 +1959,10 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) * previous interrupt. */ if (rx_ring->set_itr) { - writel(1000000000 / (rx_ring->itr_val * 256), - rx_ring->itr_register); + u32 itr = rx_ring->itr_val ? + 1000000000 / (rx_ring->itr_val * 256) : 0; + + writel(itr, rx_ring->itr_register); rx_ring->set_itr = 0; } diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k.h b/kernel/drivers/net/ethernet/intel/fm10k/fm10k.h index 144402004..48809e5d3 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -33,7 +33,7 @@ #include "fm10k_pf.h" #include "fm10k_vf.h" -#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */ +#define FM10K_MAX_JUMBO_FRAME_SIZE 15342 /* Maximum supported size 15K */ #define MAX_QUEUES FM10K_MAX_QUEUES_PF diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c index e76a44cf3..09281558b 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1428,6 +1428,10 @@ static int fm10k_poll(struct napi_struct *napi, int budget) fm10k_for_each_ring(ring, q_vector->tx) clean_complete &= fm10k_clean_tx_irq(q_vector, ring); + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + return budget; + /* attempt to distribute budget to each queue fairly, but don't * allow the budget to go below 1 because we'll exit polling */ @@ -1966,8 +1970,10 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) /* Allocate memory for queues */ err = fm10k_alloc_q_vectors(interface); - if (err) + if (err) { + fm10k_reset_msix_capability(interface); return err; + } /* Map rings to devices, and map devices to physical queues */ fm10k_assign_rings(interface); diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 74be792f3..7f3fb51bc 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -159,13 +159,30 @@ static void fm10k_reinit(struct fm10k_intfc *interface) fm10k_mbx_free_irq(interface); + /* free interrupts */ + fm10k_clear_queueing_scheme(interface); + /* delay any future reset requests */ interface->last_reset = jiffies + (10 * HZ); /* reset and initialize the hardware so it is in a known state */ - err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw); - if (err) + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_err(&interface->pdev->dev, "reset_hw failed: %d\n", err); + goto reinit_err; + } + + err = hw->mac.ops.init_hw(hw); + if (err) { dev_err(&interface->pdev->dev, "init_hw failed: %d\n", err); + goto reinit_err; + } + + err = fm10k_init_queueing_scheme(interface); + if (err) { + dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err); + goto reinit_err; + } /* reassociate interrupts */ fm10k_mbx_request_irq(interface); @@ -193,6 +210,10 @@ static void fm10k_reinit(struct fm10k_intfc *interface) fm10k_iov_resume(interface->pdev); +reinit_err: + if (err) + netif_device_detach(netdev); + rtnl_unlock(); clear_bit(__FM10K_RESETTING, &interface->state); @@ -1101,6 +1122,10 @@ void fm10k_mbx_free_irq(struct fm10k_intfc *interface) struct fm10k_hw *hw = &interface->hw; int itr_reg; + /* no mailbox IRQ to free if MSI-X is not enabled */ + if (!interface->msix_entries) + return; + /* disconnect the mailbox */ hw->mbx.ops.disconnect(hw, &hw->mbx); @@ -1423,10 +1448,15 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface) err = fm10k_mbx_request_irq_pf(interface); else err = fm10k_mbx_request_irq_vf(interface); + if (err) + return err; /* connect mailbox */ - if (!err) - err = hw->mbx.ops.connect(hw, &hw->mbx); + err = hw->mbx.ops.connect(hw, &hw->mbx); + + /* if the mailbox failed to connect, then free IRQ */ + if (err) + fm10k_mbx_free_irq(interface); return err; } @@ -1684,7 +1714,13 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, interface->last_reset = jiffies + (10 * HZ); /* reset and initialize the hardware so it is in a known state */ - err = hw->mac.ops.reset_hw(hw) ? : hw->mac.ops.init_hw(hw); + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_err(&pdev->dev, "reset_hw failed: %d\n", err); + return err; + } + + err = hw->mac.ops.init_hw(hw); if (err) { dev_err(&pdev->dev, "init_hw failed: %d\n", err); return err; @@ -2071,8 +2107,10 @@ static int fm10k_resume(struct pci_dev *pdev) /* reset hardware to known state */ err = hw->mac.ops.init_hw(&interface->hw); - if (err) + if (err) { + dev_err(&pdev->dev, "init_hw failed: %d\n", err); return err; + } /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); @@ -2185,6 +2223,9 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) fm10k_close(netdev); + /* free interrupts */ + fm10k_clear_queueing_scheme(interface); + fm10k_mbx_free_irq(interface); pci_disable_device(pdev); @@ -2248,11 +2289,21 @@ static void fm10k_io_resume(struct pci_dev *pdev) int err = 0; /* reset hardware to known state */ - hw->mac.ops.init_hw(&interface->hw); + err = hw->mac.ops.init_hw(&interface->hw); + if (err) { + dev_err(&pdev->dev, "init_hw failed: %d\n", err); + return; + } /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + err = fm10k_init_queueing_scheme(interface); + if (err) { + dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err); + return; + } + /* reassociate interrupts */ fm10k_mbx_request_irq(interface); diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 318a212f0..35afd711d 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -77,6 +77,7 @@ struct fm10k_hw; #define FM10K_PCIE_SRIOV_CTRL_VFARI 0x10 #define FM10K_ERR_PARAM -2 +#define FM10K_ERR_NO_RESOURCES -3 #define FM10K_ERR_REQUESTS_PENDING -4 #define FM10K_ERR_RESET_REQUESTED -5 #define FM10K_ERR_DMA_PENDING -6 diff --git a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 36c8b0aa0..d512575c3 100644 --- a/kernel/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/kernel/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -103,7 +103,14 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) s32 err; u16 i; - /* assume we always have at least 1 queue */ + /* verify we have at least 1 queue */ + if (!~fm10k_read_reg(hw, FM10K_TXQCTL(0)) || + !~fm10k_read_reg(hw, FM10K_RXQCTL(0))) { + err = FM10K_ERR_NO_RESOURCES; + goto reset_max_queues; + } + + /* determine how many queues we have */ for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) { /* verify the Descriptor cache offsets are increasing */ tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i)); @@ -119,7 +126,7 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) /* shut down queues we own and reset DMA configuration */ err = fm10k_disable_queues_generic(hw, i); if (err) - return err; + goto reset_max_queues; /* record maximum queue count */ hw->mac.max_queues = i; @@ -129,6 +136,11 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT; return 0; + +reset_max_queues: + hw->mac.max_queues = 0; + + return err; } /* This structure defines the attibutes to be parsed below */ diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e.h b/kernel/drivers/net/ethernet/intel/i40e/i40e.h index 4dd3e2612..7e258a83c 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e.h +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e.h @@ -767,6 +767,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); +int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_common.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_common.c index 2d74c6e4d..1cf715c72 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -302,13 +302,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; - u16 len = le16_to_cpu(aq_desc->datalen); + u16 len; u8 *buf = (u8 *)buffer; u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; + len = le16_to_cpu(aq_desc->datalen); + i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", le16_to_cpu(aq_desc->opcode), diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 3f385ffe4..488a50d59 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2164,8 +2164,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; @@ -2176,8 +2175,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; @@ -2188,9 +2186,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); @@ -2202,9 +2198,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_main.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_main.c index 4a9873ec2..4edbab6ca 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1317,6 +1317,42 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, } /** + * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS + * @vsi: the VSI to be searched + * @macaddr: the mac address to be removed + * @is_vf: true if it is a VF + * @is_netdev: true if it is a netdev + * + * Removes a given MAC address from a VSI, regardless of VLAN + * + * Returns 0 for success, or error + **/ +int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, + bool is_vf, bool is_netdev) +{ + struct i40e_mac_filter *f = NULL; + int changed = 0; + + WARN(!spin_is_locked(&vsi->mac_filter_list_lock), + "Missing mac_filter_list_lock\n"); + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if ((ether_addr_equal(macaddr, f->macaddr)) && + (is_vf == f->is_vf) && + (is_netdev == f->is_netdev)) { + f->counter--; + f->changed = true; + changed = 1; + } + } + if (changed) { + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + return 0; + } + return -ENOENT; +} + +/** * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM * @vsi: the PF Main VSI - inappropriate for any other VSI * @macaddr: the MAC address @@ -1547,9 +1583,11 @@ static int i40e_set_mac(struct net_device *netdev, void *p) spin_unlock_bh(&vsi->mac_filter_list_lock); } - i40e_sync_vsi_filters(vsi, false); ether_addr_copy(netdev->dev_addr, addr->sa_data); - + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); return 0; } @@ -1935,11 +1973,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) /* Now process 'del_list' outside the lock */ if (!list_empty(&tmp_del_list)) { + int del_list_size; + filter_list_len = pf->hw.aq.asq_buf_size / sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kcalloc(filter_list_len, - sizeof(struct i40e_aqc_remove_macvlan_element_data), - GFP_KERNEL); + del_list_size = filter_list_len * + sizeof(struct i40e_aqc_remove_macvlan_element_data); + del_list = kzalloc(del_list_size, GFP_KERNEL); if (!del_list) { i40e_cleanup_add_list(&tmp_add_list); @@ -1971,7 +2011,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) NULL); aq_err = pf->hw.aq.asq_last_status; num_del = 0; - memset(del_list, 0, sizeof(*del_list)); + memset(del_list, 0, del_list_size); if (ret && aq_err != I40E_AQ_RC_ENOENT) dev_err(&pf->pdev->dev, @@ -2004,13 +2044,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) } if (!list_empty(&tmp_add_list)) { + int add_list_size; /* do all the adds now */ filter_list_len = pf->hw.aq.asq_buf_size / sizeof(struct i40e_aqc_add_macvlan_element_data), - add_list = kcalloc(filter_list_len, - sizeof(struct i40e_aqc_add_macvlan_element_data), - GFP_KERNEL); + add_list_size = filter_list_len * + sizeof(struct i40e_aqc_add_macvlan_element_data); + add_list = kzalloc(add_list_size, GFP_KERNEL); if (!add_list) { /* Purge element from temporary lists */ i40e_cleanup_add_list(&tmp_add_list); @@ -2048,7 +2089,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) if (ret) break; - memset(add_list, 0, sizeof(*add_list)); + memset(add_list, 0, add_list_size); } /* Entries from tmp_add_list were cloned from MAC * filter list, hence clean those cloned entries @@ -2112,12 +2153,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) */ if (pf->cur_promisc != cur_promisc) { pf->cur_promisc = cur_promisc; - if (grab_rtnl) - i40e_do_reset_safe(pf, - BIT(__I40E_PF_RESET_REQUESTED)); - else - i40e_do_reset(pf, - BIT(__I40E_PF_RESET_REQUESTED)); + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); } } else { ret = i40e_aq_set_vsi_unicast_promiscuous( @@ -2377,16 +2413,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) } } - /* Make sure to release before sync_vsi_filter because that - * function will lock/unlock as necessary - */ spin_unlock_bh(&vsi->mac_filter_list_lock); - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) - return 0; - - return i40e_sync_vsi_filters(vsi, false); + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); + return 0; } /** @@ -2459,16 +2492,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) } } - /* Make sure to release before sync_vsi_filter because that - * function with lock/unlock as necessary - */ spin_unlock_bh(&vsi->mac_filter_list_lock); - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) - return 0; - - return i40e_sync_vsi_filters(vsi, false); + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); + return 0; } /** @@ -2711,6 +2741,11 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) netif_set_xps_queue(ring->netdev, mask, ring->queue_index); free_cpumask_var(mask); } + + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); } /** @@ -6685,6 +6720,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) struct i40e_hw *hw = &pf->hw; u8 set_fc_aq_fail = 0; i40e_status ret; + u32 val; u32 v; /* Now we wait for GRST to settle out. @@ -6823,6 +6859,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } } + /* Reconfigure hardware for allowing smaller MSS in the case + * of TSO, so that we avoid the MDD being fired and causing + * a reset in the case of small MSS+TSO. + */ +#define I40E_REG_MSS 0x000E64DC +#define I40E_REG_MSS_MIN_MASK 0x3FF0000 +#define I40E_64BYTE_MSS 0x400000 + val = rd32(hw, I40E_REG_MSS); + if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { + val &= ~I40E_REG_MSS_MIN_MASK; + val |= I40E_64BYTE_MSS; + wr32(hw, I40E_REG_MSS, val); + } + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); @@ -8545,7 +8595,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return 0; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, - nlflags, 0, 0, filter_mask, NULL); + 0, 0, nlflags, filter_mask, NULL); } #define I40E_MAX_TUNNEL_HDR_LEN 80 @@ -10183,6 +10233,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u16 link_status; int err; u32 len; + u32 val; u32 i; u8 set_fc_aq_fail; @@ -10493,6 +10544,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + /* Reconfigure hardware for allowing smaller MSS in the case + * of TSO, so that we avoid the MDD being fired and causing + * a reset in the case of small MSS+TSO. + */ + val = rd32(hw, I40E_REG_MSS); + if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) { + val &= ~I40E_REG_MSS_MIN_MASK; + val |= I40E_64BYTE_MSS; + wr32(hw, I40E_REG_MSS, val); + } + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4)) { msleep(75); @@ -10791,6 +10853,12 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, dev_info(&pdev->dev, "%s: error %d\n", __func__, error); + if (!pf) { + dev_info(&pdev->dev, + "Cannot recover - error happened during device probe\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + /* shutdown all operations */ if (!test_bit(__I40E_SUSPENDED, &pf->state)) { rtnl_lock(); diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 635b3ac17..26c55bba4 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -235,6 +235,9 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, "Filter deleted for PCTYPE %d loc = %d\n", fd_data->pctype, fd_data->fd_id); } + if (err) + kfree(raw_packet); + return err ? -EOPNOTSUPP : 0; } @@ -312,6 +315,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, fd_data->pctype, fd_data->fd_id); } + if (err) + kfree(raw_packet); + return err ? -EOPNOTSUPP : 0; } @@ -387,6 +393,9 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, } } + if (err) + kfree(raw_packet); + return err ? -EOPNOTSUPP : 0; } @@ -526,11 +535,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, struct i40e_tx_buffer *tx_buffer) { if (tx_buffer->skb) { - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) - kfree(tx_buffer->raw_buf); - else - dev_kfree_skb_any(tx_buffer->skb); - + dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), @@ -542,6 +547,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } + + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buffer->raw_buf); + tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); @@ -1416,31 +1425,12 @@ checksum_fail: } /** - * i40e_rx_hash - returns the hash value from the Rx descriptor - * @ring: descriptor ring - * @rx_desc: specific descriptor - **/ -static inline u32 i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc) -{ - const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - - if ((ring->netdev->features & NETIF_F_RXHASH) && - (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - else - return 0; -} - -/** - * i40e_ptype_to_hash - get a hash type + * i40e_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -1458,6 +1448,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** + * i40e_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline void i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) +{ + u32 hash; + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if (ring->netdev->features & NETIF_F_RXHASH) + return; + + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + } +} + +/** * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed @@ -1606,8 +1620,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> @@ -1736,8 +1750,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> diff --git a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 44462b40f..e116d9a99 100644 --- a/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/kernel/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -549,12 +549,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); spin_lock_bh(&vsi->mac_filter_list_lock); - f = i40e_add_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id ? vf->port_vlan_id : -1, - true, false); - if (!f) - dev_info(&pf->pdev->dev, - "Could not allocate VF MAC addr\n"); + if (is_valid_ether_addr(vf->default_lan_addr.addr)) { + f = i40e_add_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id ? vf->port_vlan_id : -1, + true, false); + if (!f) + dev_info(&pf->pdev->dev, + "Could not add MAC filter %pM for VF %d\n", + vf->default_lan_addr.addr, vf->vf_id); + } f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); @@ -1680,8 +1683,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) spin_lock_bh(&vsi->mac_filter_list_lock); /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) - i40e_del_filter(vsi, al->list[i].addr, - I40E_VLAN_ANY, true, false); + if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) { + ret = I40E_ERR_INVALID_MAC_ADDR; + spin_unlock_bh(&vsi->mac_filter_list_lock); + goto error_param; + } + spin_unlock_bh(&vsi->mac_filter_list_lock); /* program the updated filter list */ diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 47e9a90d6..39db70a59 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -51,11 +51,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, struct i40e_tx_buffer *tx_buffer) { if (tx_buffer->skb) { - if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) - kfree(tx_buffer->raw_buf); - else - dev_kfree_skb_any(tx_buffer->skb); - + dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), @@ -67,6 +63,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } + + if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) + kfree(tx_buffer->raw_buf); + tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; dma_unmap_len_set(tx_buffer, len, 0); @@ -245,16 +245,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; - /* check to see if there are any non-cache aligned descriptors - * waiting to be written back, and kick the hardware to force - * them to be written back in case of napi polling - */ - if (budget && - !((i & WB_STRIDE) == WB_STRIDE) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; - netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), total_packets, total_bytes); @@ -889,31 +879,12 @@ checksum_fail: } /** - * i40e_rx_hash - returns the hash value from the Rx descriptor - * @ring: descriptor ring - * @rx_desc: specific descriptor - **/ -static inline u32 i40e_rx_hash(struct i40e_ring *ring, - union i40e_rx_desc *rx_desc) -{ - const __le64 rss_mask = - cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - - if ((ring->netdev->features & NETIF_F_RXHASH) && - (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - else - return 0; -} - -/** - * i40e_ptype_to_hash - get a hash type + * i40e_ptype_to_htype - get a hash type * @ptype: the ptype value from the descriptor * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -931,6 +902,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** + * i40e_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline void i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb, + u8 rx_ptype) +{ + u32 hash; + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if (ring->netdev->features & NETIF_F_RXHASH) + return; + + if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { + hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + } +} + +/** * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed @@ -1071,8 +1066,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; @@ -1189,8 +1184,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) continue; } - skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), - i40e_ptype_to_hash(rx_ptype)); + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; @@ -1770,6 +1764,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 td_tag = 0; dma_addr_t dma; u16 gso_segs; + u16 desc_count = 0; + bool tail_bump = true; + bool do_rs = false; if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; @@ -1810,6 +1807,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; + desc_count++; + if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; @@ -1829,6 +1828,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc++; i++; + desc_count++; + if (i == tx_ring->count) { tx_desc = I40E_TX_DESC(tx_ring, 0); i = 0; @@ -1843,35 +1844,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = &tx_ring->tx_bi[i]; } - /* Place RS bit on last descriptor of any packet that spans across the - * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. - */ #define WB_STRIDE 0x3 - if (((i & WB_STRIDE) != WB_STRIDE) && - (first <= &tx_ring->tx_bi[i]) && - (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << - I40E_TXD_QW1_CMD_SHIFT); - } else { - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TXD_CMD << - I40E_TXD_QW1_CMD_SHIFT); - } - - netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->queue_index), - first->bytecount); - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; @@ -1881,15 +1854,78 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; + netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + first->bytecount); i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* Algorithm to optimize tail and RS bit setting: + * if xmit_more is supported + * if xmit_more is true + * do not update tail and do not mark RS bit. + * if xmit_more is false and last xmit_more was false + * if every packet spanned less than 4 desc + * then set RS bit on 4th packet and update tail + * on every packet + * else + * update tail and set RS bit on every packet. + * if xmit_more is false and last_xmit_more was true + * update tail and set RS bit. + * else (kernel < 3.18) + * if every packet spanned less than 4 desc + * then set RS bit on 4th packet and update tail + * on every packet + * else + * set RS bit on EOP for every packet and update tail + * + * Optimization: wmb to be issued only in case of tail update. + * Also optimize the Descriptor WB path for RS bit with the same + * algorithm. + * + * Note: If there are less than 4 packets + * pending and interrupts were disabled the service task will + * trigger a force WB. + */ + if (skb->xmit_more && + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) { + tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; + tail_bump = false; + } else if (!skb->xmit_more && + !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index)) && + (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && + (tx_ring->packet_stride < WB_STRIDE) && + (desc_count < WB_STRIDE)) { + tx_ring->packet_stride++; + } else { + tx_ring->packet_stride = 0; + tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; + do_rs = true; + } + if (do_rs) + tx_ring->packet_stride = 0; + + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD : + I40E_TX_DESC_CMD_EOP) << + I40E_TXD_QW1_CMD_SHIFT); + /* notify HW of packet */ - if (!skb->xmit_more || - netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, - tx_ring->queue_index))) - writel(i, tx_ring->tail); - else + if (!tail_bump) prefetchw(tx_desc + 1); + if (tail_bump) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, tx_ring->tail); + } + return; dma_error: diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index ebc1bf77f..998976844 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -267,6 +267,8 @@ struct i40e_ring { bool ring_active; /* is ring online or not */ bool arm_wb; /* do something to arm write back */ + u8 packet_stride; +#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2) u16 flags; #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/kernel/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 4790437a5..2ac62efc3 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -477,54 +477,30 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, switch (nfc->flow_type) { case TCP_V4_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; - default: + else return -EINVAL; - } break; case TCP_V6_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; - default: + else return -EINVAL; - } break; case UDP_V4_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - default: + } else { return -EINVAL; } break; case UDP_V6_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - default: + } else { return -EINVAL; } break; diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/kernel/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 99d2cffae..5f03ab3df 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1864,6 +1864,9 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) { int i; + if (!adapter->tx_rings) + return; + for (i = 0; i < adapter->num_active_queues; i++) if (adapter->tx_rings[i]->desc) i40evf_free_tx_resources(adapter->tx_rings[i]); @@ -1932,6 +1935,9 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) { int i; + if (!adapter->rx_rings) + return; + for (i = 0; i < adapter->num_active_queues; i++) if (adapter->rx_rings[i]->desc) i40evf_free_rx_resources(adapter->rx_rings[i]); diff --git a/kernel/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/kernel/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 32e620e1e..5de3f52fd 100644 --- a/kernel/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/kernel/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -391,6 +391,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) struct i40e_virtchnl_ether_addr_list *veal; int len, i = 0, count = 0; struct i40evf_mac_filter *f; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -415,7 +416,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (count * sizeof(struct i40e_virtchnl_ether_addr)); + more = true; } veal = kzalloc(len, GFP_ATOMIC); @@ -431,7 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) f->add = false; } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)veal, len); kfree(veal); @@ -450,6 +454,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) struct i40e_virtchnl_ether_addr_list *veal; struct i40evf_mac_filter *f, *ftmp; int len, i = 0, count = 0; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -474,7 +479,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (count * sizeof(struct i40e_virtchnl_ether_addr)); + more = true; } veal = kzalloc(len, GFP_ATOMIC); if (!veal) @@ -490,7 +497,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) kfree(f); } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)veal, len); kfree(veal); @@ -509,6 +517,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) struct i40e_virtchnl_vlan_filter_list *vvfl; int len, i = 0, count = 0; struct i40evf_vlan_filter *f; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -534,7 +543,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + more = true; } vvfl = kzalloc(len, GFP_ATOMIC); if (!vvfl) @@ -549,7 +560,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) f->add = false; } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -567,6 +579,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) struct i40e_virtchnl_vlan_filter_list *vvfl; struct i40evf_vlan_filter *f, *ftmp; int len, i = 0, count = 0; + bool more = false; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -592,7 +605,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); - len = I40EVF_MAX_AQ_BUF_SIZE; + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + more = true; } vvfl = kzalloc(len, GFP_ATOMIC); if (!vvfl) @@ -608,7 +623,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) kfree(f); } } - adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + if (!more) + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); kfree(vvfl); } diff --git a/kernel/drivers/net/ethernet/intel/igb/e1000_82575.c b/kernel/drivers/net/ethernet/intel/igb/e1000_82575.c index 7a73510e5..97bf0c3d5 100644 --- a/kernel/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/kernel/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -294,6 +294,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) case I210_I_PHY_ID: phy->type = e1000_phy_i210; phy->ops.check_polarity = igb_check_polarity_m88; + phy->ops.get_cfg_done = igb_get_cfg_done_i210; phy->ops.get_phy_info = igb_get_phy_info_m88; phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; diff --git a/kernel/drivers/net/ethernet/intel/igb/e1000_i210.c b/kernel/drivers/net/ethernet/intel/igb/e1000_i210.c index 65d931669..29f59c768 100644 --- a/kernel/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/kernel/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -900,3 +900,30 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw) wr32(E1000_MDICNFG, mdicnfg); return ret_val; } + +/** + * igb_get_cfg_done_i210 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * 0. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +s32 igb_get_cfg_done_i210(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + while (timeout) { + if (rd32(E1000_EEMNGCTL_I210) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) + hw_dbg("MNG configuration cycle has not completed.\n"); + + return 0; +} diff --git a/kernel/drivers/net/ethernet/intel/igb/e1000_i210.h b/kernel/drivers/net/ethernet/intel/igb/e1000_i210.h index 3442b6357..eaa68a50c 100644 --- a/kernel/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/kernel/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -34,6 +34,7 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); s32 igb_init_nvm_params_i210(struct e1000_hw *hw); bool igb_get_flash_presence_i210(struct e1000_hw *hw); s32 igb_pll_workaround_i210(struct e1000_hw *hw); +s32 igb_get_cfg_done_i210(struct e1000_hw *hw); #define E1000_STM_OPCODE 0xDB00 #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 diff --git a/kernel/drivers/net/ethernet/intel/igb/e1000_regs.h b/kernel/drivers/net/ethernet/intel/igb/e1000_regs.h index 4af2870e4..0fdcd4d1b 100644 --- a/kernel/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/kernel/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -66,6 +66,7 @@ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGCTL_I210 0x12030 /* MNG EEprom Control */ #define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ diff --git a/kernel/drivers/net/ethernet/intel/igb/igb.h b/kernel/drivers/net/ethernet/intel/igb/igb.h index 1a2f1cc44..e3cb93bdb 100644 --- a/kernel/drivers/net/ethernet/intel/igb/igb.h +++ b/kernel/drivers/net/ethernet/intel/igb/igb.h @@ -389,6 +389,8 @@ struct igb_adapter { u16 link_speed; u16 link_duplex; + u8 __iomem *io_addr; /* Mainly for iounmap use */ + struct work_struct reset_task; struct work_struct watchdog_task; bool fc_autoneg; diff --git a/kernel/drivers/net/ethernet/intel/igb/igb_main.c b/kernel/drivers/net/ethernet/intel/igb/igb_main.c index ea7b09887..fa3b4cbea 100644 --- a/kernel/drivers/net/ethernet/intel/igb/igb_main.c +++ b/kernel/drivers/net/ethernet/intel/igb/igb_main.c @@ -2294,9 +2294,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); err = -EIO; - hw->hw_addr = pci_iomap(pdev, 0, 0); - if (!hw->hw_addr) + adapter->io_addr = pci_iomap(pdev, 0, 0); + if (!adapter->io_addr) goto err_ioremap; + /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */ + hw->hw_addr = adapter->io_addr; netdev->netdev_ops = &igb_netdev_ops; igb_set_ethtool_ops(netdev); @@ -2656,7 +2658,7 @@ err_sw_init: #ifdef CONFIG_PCI_IOV igb_disable_sriov(pdev); #endif - pci_iounmap(pdev, hw->hw_addr); + pci_iounmap(pdev, adapter->io_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: @@ -2823,7 +2825,7 @@ static void igb_remove(struct pci_dev *pdev) igb_clear_interrupt_scheme(adapter); - pci_iounmap(pdev, hw->hw_addr); + pci_iounmap(pdev, adapter->io_addr); if (hw->flash_address) iounmap(hw->flash_address); pci_release_selected_regions(pdev, @@ -2856,6 +2858,13 @@ static void igb_probe_vfs(struct igb_adapter *adapter) if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) return; + /* Of the below we really only want the effect of getting + * IGB_FLAG_HAS_MSIX set (if available), without which + * igb_enable_sriov() has no effect. + */ + igb_set_interrupt_capability(adapter, true); + igb_reset_interrupt_capability(adapter); + pci_sriov_set_totalvfs(pdev, 7); igb_enable_sriov(pdev, max_vfs); diff --git a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index aed8d029b..cd9b284bc 100644 --- a/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2786,7 +2786,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ixgbe_for_each_ring(ring, q_vector->tx) clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); - if (!ixgbe_qv_lock_napi(q_vector)) + /* Exit if we are called by netpoll or busy polling is active */ + if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) return budget; /* attempt to distribute budget to each queue fairly, but don't allow diff --git a/kernel/drivers/net/ethernet/jme.c b/kernel/drivers/net/ethernet/jme.c index 060dd3922..1257b18e6 100644 --- a/kernel/drivers/net/ethernet/jme.c +++ b/kernel/drivers/net/ethernet/jme.c @@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme) } static inline void -jme_clear_pm(struct jme_adapter *jme) +jme_clear_pm_enable_wol(struct jme_adapter *jme) { jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); } +static inline void +jme_clear_pm_disable_wol(struct jme_adapter *jme) +{ + jwrite32(jme, JME_PMCS, PMCS_STMASK); +} + static int jme_reload_eeprom(struct jme_adapter *jme) { @@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev) struct jme_adapter *jme = netdev_priv(netdev); int rc; - jme_clear_pm(jme); + jme_clear_pm_disable_wol(jme); JME_NAPI_ENABLE(jme); tasklet_init(&jme->linkch_task, jme_link_change_tasklet, @@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme) static void jme_powersave_phy(struct jme_adapter *jme) { - if (jme->reg_pmcs) { + if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) { jme_set_100m_half(jme); if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) jme_wait_link(jme); - jme_clear_pm(jme); + jme_clear_pm_enable_wol(jme); } else { jme_phy_off(jme); } @@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev, if (wol->wolopts & WAKE_MAGIC) jme->reg_pmcs |= PMCS_MFEN; - jwrite32(jme, JME_PMCS, jme->reg_pmcs); - device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs)); - return 0; } @@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev, jme->mii_if.mdio_read = jme_mdio_read; jme->mii_if.mdio_write = jme_mdio_write; - jme_clear_pm(jme); - device_set_wakeup_enable(&pdev->dev, true); + jme_clear_pm_disable_wol(jme); + device_init_wakeup(&pdev->dev, true); jme_set_phyfifo_5level(jme); jme->pcirev = pdev->revision; @@ -3304,7 +3307,7 @@ jme_resume(struct device *dev) if (!netif_running(netdev)) return 0; - jme_clear_pm(jme); + jme_clear_pm_disable_wol(jme); jme_phy_on(jme); if (test_bit(JME_FLAG_SSET, &jme->flags)) jme_set_settings(netdev, &jme->old_ecmd); @@ -3312,13 +3315,14 @@ jme_resume(struct device *dev) jme_reset_phy_processor(jme); jme_phy_calibration(jme); jme_phy_setEA(jme); - jme_start_irq(jme); netif_device_attach(netdev); atomic_inc(&jme->link_changing); jme_reset_link(jme); + jme_start_irq(jme); + return 0; } diff --git a/kernel/drivers/net/ethernet/marvell/mvneta.c b/kernel/drivers/net/ethernet/marvell/mvneta.c index ed622fa29..71ec9cb08 100644 --- a/kernel/drivers/net/ethernet/marvell/mvneta.c +++ b/kernel/drivers/net/ethernet/marvell/mvneta.c @@ -226,7 +226,7 @@ /* Various constants */ /* Coalescing */ -#define MVNETA_TXDONE_COAL_PKTS 1 +#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */ #define MVNETA_RX_COAL_PKTS 32 #define MVNETA_RX_COAL_USEC 100 @@ -3404,7 +3404,7 @@ static int mvneta_probe(struct platform_device *pdev) dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; - dev->priv_flags |= IFF_UNICAST_FLT; + dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; err = register_netdev(dev); diff --git a/kernel/drivers/net/ethernet/marvell/mvpp2.c b/kernel/drivers/net/ethernet/marvell/mvpp2.c index a4beccf1f..25aba9886 100644 --- a/kernel/drivers/net/ethernet/marvell/mvpp2.c +++ b/kernel/drivers/net/ethernet/marvell/mvpp2.c @@ -772,6 +772,17 @@ struct mvpp2_rx_desc { u32 reserved8; }; +struct mvpp2_txq_pcpu_buf { + /* Transmitted SKB */ + struct sk_buff *skb; + + /* Physical address of transmitted buffer */ + dma_addr_t phys; + + /* Size transmitted */ + size_t size; +}; + /* Per-CPU Tx queue control */ struct mvpp2_txq_pcpu { int cpu; @@ -787,11 +798,8 @@ struct mvpp2_txq_pcpu { /* Number of Tx DMA descriptors reserved for each CPU */ int reserved_num; - /* Array of transmitted skb */ - struct sk_buff **tx_skb; - - /* Array of transmitted buffers' physical addresses */ - dma_addr_t *tx_buffs; + /* Infos about transmitted buffers */ + struct mvpp2_txq_pcpu_buf *buffs; /* Index of last TX DMA descriptor that was inserted */ int txq_put_index; @@ -981,10 +989,11 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, struct sk_buff *skb, struct mvpp2_tx_desc *tx_desc) { - txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; - if (skb) - txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] = - tx_desc->buf_phys_addr; + struct mvpp2_txq_pcpu_buf *tx_buf = + txq_pcpu->buffs + txq_pcpu->txq_put_index; + tx_buf->skb = skb; + tx_buf->size = tx_desc->data_size; + tx_buf->phys = tx_desc->buf_phys_addr; txq_pcpu->txq_put_index++; if (txq_pcpu->txq_put_index == txq_pcpu->size) txq_pcpu->txq_put_index = 0; @@ -4403,17 +4412,16 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, int i; for (i = 0; i < num; i++) { - dma_addr_t buf_phys_addr = - txq_pcpu->tx_buffs[txq_pcpu->txq_get_index]; - struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; + struct mvpp2_txq_pcpu_buf *tx_buf = + txq_pcpu->buffs + txq_pcpu->txq_get_index; mvpp2_txq_inc_get(txq_pcpu); - dma_unmap_single(port->dev->dev.parent, buf_phys_addr, - skb_headlen(skb), DMA_TO_DEVICE); - if (!skb) + dma_unmap_single(port->dev->dev.parent, tx_buf->phys, + tx_buf->size, DMA_TO_DEVICE); + if (!tx_buf->skb) continue; - dev_kfree_skb_any(skb); + dev_kfree_skb_any(tx_buf->skb); } } @@ -4664,15 +4672,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port, for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); txq_pcpu->size = txq->size; - txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * - sizeof(*txq_pcpu->tx_skb), - GFP_KERNEL); - if (!txq_pcpu->tx_skb) - goto error; - - txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size * - sizeof(dma_addr_t), GFP_KERNEL); - if (!txq_pcpu->tx_buffs) + txq_pcpu->buffs = kmalloc(txq_pcpu->size * + sizeof(struct mvpp2_txq_pcpu_buf), + GFP_KERNEL); + if (!txq_pcpu->buffs) goto error; txq_pcpu->count = 0; @@ -4686,8 +4689,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, error: for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - kfree(txq_pcpu->tx_skb); - kfree(txq_pcpu->tx_buffs); + kfree(txq_pcpu->buffs); } dma_free_coherent(port->dev->dev.parent, @@ -4706,8 +4708,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - kfree(txq_pcpu->tx_skb); - kfree(txq_pcpu->tx_buffs); + kfree(txq_pcpu->buffs); } if (txq->descs) diff --git a/kernel/drivers/net/ethernet/marvell/sky2.c b/kernel/drivers/net/ethernet/marvell/sky2.c index 5606a0430..4b62aa1f9 100644 --- a/kernel/drivers/net/ethernet/marvell/sky2.c +++ b/kernel/drivers/net/ethernet/marvell/sky2.c @@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); static void sky2_shutdown(struct pci_dev *pdev) { + struct sky2_hw *hw = pci_get_drvdata(pdev); + int port; + + for (port = 0; port < hw->ports; port++) { + struct net_device *ndev = hw->dev[port]; + + rtnl_lock(); + if (netif_running(ndev)) { + dev_close(ndev); + netif_device_detach(ndev); + } + rtnl_unlock(); + } sky2_suspend(&pdev->dev); pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); pci_set_power_state(pdev, PCI_D3hot); diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 67e9633ea..232191417 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2282,7 +2282,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac) struct mlx4_en_dev *mdev = en_priv->mdev; u64 mac_u64 = mlx4_mac_to_u64(mac); - if (!is_valid_ether_addr(mac)) + if (is_multicast_ether_addr(mac)) return -EINVAL; return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c index e7a5000aa..28a4b3431 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -502,8 +502,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) return; for (ring = 0; ring < priv->rx_ring_num; ring++) { - if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) + if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) { + local_bh_disable(); napi_reschedule(&priv->rx_cq[ring]->napi); + local_bh_enable(); + } } } @@ -704,7 +707,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS) return -1; - hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8)); + hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); csum_pseudo_hdr = csum_partial(&ipv6h->saddr, sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/kernel/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 4421bf546..e4019a803 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -400,7 +400,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, u32 packets = 0; u32 bytes = 0; int factor = priv->cqe_factor; - u64 timestamp = 0; int done = 0; int budget = priv->tx_work_limit; u32 last_nr_txbb; @@ -440,9 +439,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, new_index = be16_to_cpu(cqe->wqe_index) & size_mask; do { + u64 timestamp = 0; + txbbs_skipped += last_nr_txbb; ring_index = (ring_index + last_nr_txbb) & size_mask; - if (ring->tx_info[ring_index].ts_requested) + + if (unlikely(ring->tx_info[ring_index].ts_requested)) timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ diff --git a/kernel/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/kernel/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index cad6c44df..d314d96dc 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3132,7 +3132,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev, case QP_TRANS_RTS2RTS: case QP_TRANS_SQD2SQD: case QP_TRANS_SQD2RTS: - if (slave != mlx4_master_func_num(dev)) + if (slave != mlx4_master_func_num(dev)) { if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) @@ -3151,6 +3151,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev, if (qp_ctx->alt_path.mgid_index >= num_gids) return -EINVAL; } + } break; default: break; diff --git a/kernel/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/kernel/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 037fc4cdf..cc1990636 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) return cmd->cmd_buf + (idx << cmd->log_stride); } -static u8 xor8_buf(void *buf, int len) +static u8 xor8_buf(void *buf, size_t offset, int len) { u8 *ptr = buf; u8 sum = 0; int i; + int end = len + offset; - for (i = 0; i < len; i++) + for (i = offset; i < end; i++) sum ^= ptr[i]; return sum; @@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len) static int verify_block_sig(struct mlx5_cmd_prot_block *block) { - if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) + size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); + int xor_len = sizeof(*block) - sizeof(block->data) - 1; + + if (xor8_buf(block, rsvd0_off, xor_len) != 0xff) return -EINVAL; - if (xor8_buf(block, sizeof(*block)) != 0xff) + if (xor8_buf(block, 0, sizeof(*block)) != 0xff) return -EINVAL; return 0; } -static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, - int csum) +static void calc_block_sig(struct mlx5_cmd_prot_block *block) { - block->token = token; - if (csum) { - block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - - sizeof(block->data) - 2); - block->sig = ~xor8_buf(block, sizeof(*block) - 1); - } + int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2; + size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); + + block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len); + block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1); } -static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) +static void calc_chain_sig(struct mlx5_cmd_msg *msg) { struct mlx5_cmd_mailbox *next = msg->next; - - while (next) { - calc_block_sig(next->buf, token, csum); + int size = msg->len; + int blen = size - min_t(int, sizeof(msg->first.data), size); + int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) + / MLX5_CMD_DATA_BLOCK_SIZE; + int i = 0; + + for (i = 0; i < n && next; i++) { + calc_block_sig(next->buf); next = next->next; } } static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) { - ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); - calc_chain_sig(ent->in, ent->token, csum); - calc_chain_sig(ent->out, ent->token, csum); + ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay)); + if (csum) { + calc_chain_sig(ent->in); + calc_chain_sig(ent->out); + } } static void poll_timeout(struct mlx5_cmd_work_ent *ent) @@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent) struct mlx5_cmd_mailbox *next = ent->out->next; int err; u8 sig; + int size = ent->out->len; + int blen = size - min_t(int, sizeof(ent->out->first.data), size); + int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) + / MLX5_CMD_DATA_BLOCK_SIZE; + int i = 0; - sig = xor8_buf(ent->lay, sizeof(*ent->lay)); + sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); if (sig != 0xff) return -EINVAL; - while (next) { + for (i = 0; i < n && next; i++) { err = verify_block_sig(next->buf); if (err) return err; @@ -641,7 +655,6 @@ static void cmd_work_handler(struct work_struct *work) spin_unlock_irqrestore(&cmd->alloc_lock, flags); } - ent->token = alloc_token(cmd); cmd->ent_arr[ent->idx] = ent; lay = get_inst(cmd, ent->idx); ent->lay = lay; @@ -755,7 +768,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out) static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, void *uout, int uout_size, mlx5_cmd_cbk_t callback, - void *context, int page_queue, u8 *status) + void *context, int page_queue, u8 *status, + u8 token) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; @@ -772,6 +786,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, if (IS_ERR(ent)) return PTR_ERR(ent); + ent->token = token; + if (!callback) init_completion(&ent->done); @@ -844,7 +860,8 @@ static const struct file_operations fops = { .write = dbg_write, }; -static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) +static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size, + u8 token) { struct mlx5_cmd_prot_block *block; struct mlx5_cmd_mailbox *next; @@ -870,6 +887,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) memcpy(block->data, from, copy); from += copy; size -= copy; + block->token = token; next = next->next; } @@ -939,7 +957,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev, } static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, - gfp_t flags, int size) + gfp_t flags, int size, + u8 token) { struct mlx5_cmd_mailbox *tmp, *head = NULL; struct mlx5_cmd_prot_block *block; @@ -968,6 +987,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, tmp->next = head; block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); block->block_num = cpu_to_be32(n - i - 1); + block->token = token; head = tmp; } msg->next = head; @@ -1351,7 +1371,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, } if (IS_ERR(msg)) - msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); return msg; } @@ -1376,6 +1396,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int err; u8 status = 0; u32 drv_synd; + u8 token; if (pci_channel_offline(dev->pdev) || dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { @@ -1394,20 +1415,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, return err; } - err = mlx5_copy_to_msg(inb, in, in_size); + token = alloc_token(&dev->cmd); + + err = mlx5_copy_to_msg(inb, in, in_size, token); if (err) { mlx5_core_warn(dev, "err %d\n", err); goto out_in; } - outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); + outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token); if (IS_ERR(outb)) { err = PTR_ERR(outb); goto out_in; } err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, - pages_queue, &status); + pages_queue, &status, token); if (err) goto out_out; @@ -1475,7 +1498,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev) INIT_LIST_HEAD(&cmd->cache.med.head); for (i = 0; i < NUM_LONG_LISTS; i++) { - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto ex_err; @@ -1485,7 +1508,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev) } for (i = 0; i < NUM_MED_LISTS; i++) { - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto ex_err; diff --git a/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 2e022e900..7cc9df717 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -399,6 +399,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); + if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) + return -ENOTSUPP; + coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec; coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts; coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec; @@ -416,11 +419,18 @@ static int mlx5e_set_coalesce(struct net_device *netdev, int tc; int i; + if (!MLX5_CAP_GEN(mdev, cq_moderation)) + return -ENOTSUPP; + + mutex_lock(&priv->state_lock); priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs; priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames; priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs; priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames; + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto out; + for (i = 0; i < priv->params.num_channels; ++i) { c = priv->channel[i]; @@ -436,6 +446,8 @@ static int mlx5e_set_coalesce(struct net_device *netdev, coal->rx_max_coalesced_frames); } +out: + mutex_unlock(&priv->state_lock); return 0; } diff --git a/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1203d892e..90e876ecc 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -863,12 +863,10 @@ static int mlx5e_open_cq(struct mlx5e_channel *c, if (err) goto err_destroy_cq; - err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq, - moderation_usecs, - moderation_frames); - if (err) - goto err_destroy_cq; - + if (MLX5_CAP_GEN(mdev, cq_moderation)) + mlx5_core_modify_cq_moderation(mdev, &cq->mcq, + moderation_usecs, + moderation_frames); return 0; err_destroy_cq: @@ -1372,7 +1370,7 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; - int hw_mtu; + u16 hw_mtu; int err; err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); @@ -1891,22 +1889,27 @@ static int mlx5e_set_features(struct net_device *netdev, return err; } +#define MXL5_HW_MIN_MTU 64 +#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN) + static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; bool was_opened; - int max_mtu; + u16 max_mtu; + u16 min_mtu; int err = 0; mlx5_query_port_max_mtu(mdev, &max_mtu, 1); max_mtu = MLX5E_HW2SW_MTU(max_mtu); + min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU); - if (new_mtu > max_mtu) { + if (new_mtu > max_mtu || new_mtu < min_mtu) { netdev_err(netdev, - "%s: Bad MTU (%d) > (%d) Max\n", - __func__, new_mtu, max_mtu); + "%s: Bad MTU (%d), valid range is: [%d..%d]\n", + __func__, new_mtu, min_mtu, max_mtu); return -EINVAL; } @@ -1958,6 +1961,8 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) } if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); + if (!MLX5_CAP_GEN(mdev, cq_moderation)) + mlx5_core_warn(mdev, "CQ modiration is not supported\n"); return 0; } diff --git a/kernel/drivers/net/ethernet/mellanox/mlx5/core/main.c b/kernel/drivers/net/ethernet/mellanox/mlx5/core/main.c index 6cf6d93d8..ba115ec7a 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -432,6 +432,13 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, to_fw_pkey_sz(128)); + /* Check log_max_qp from HCA caps to set in current profile */ + if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) { + mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n", + profile[prof_sel].log_max_qp, + MLX5_CAP_GEN_MAX(dev, log_max_qp)); + profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp); + } if (prof->mask & MLX5_PROF_MASK_QP_SIZE) MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, prof->log_max_qp); @@ -505,7 +512,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) struct mlx5_priv *priv = &mdev->priv; struct msix_entry *msix = priv->msix_arr; int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; - int numa_node = priv->numa_node; int err; if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { @@ -513,7 +519,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) return -ENOMEM; } - cpumask_set_cpu(cpumask_local_spread(i, numa_node), + cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), priv->irq_info[i].mask); err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); diff --git a/kernel/drivers/net/ethernet/mellanox/mlx5/core/port.c b/kernel/drivers/net/ethernet/mellanox/mlx5/core/port.c index a87e773e9..53a793bc2 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -246,8 +246,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); -static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, - int *max_mtu, int *oper_mtu, u8 port) +static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu, + u16 *max_mtu, u16 *oper_mtu, u8 port) { u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; @@ -267,7 +267,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); } -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port) { u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; @@ -282,14 +282,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) } EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); -void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port) { mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port); } EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); -void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, u8 port) { mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port); diff --git a/kernel/drivers/net/ethernet/mellanox/mlxsw/pci.h b/kernel/drivers/net/ethernet/mellanox/mlxsw/pci.h index 142f33d97..a0fbe00dd 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/kernel/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -206,21 +206,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1); /* pci_eqe_cmd_token * Command completion event - token */ -MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16); +MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16); /* pci_eqe_cmd_status * Command completion event - status */ -MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8); +MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8); /* pci_eqe_cmd_out_param_h * Command completion event - output parameter - higher part */ -MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32); +MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32); /* pci_eqe_cmd_out_param_l * Command completion event - output parameter - lower part */ -MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32); +MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32); #endif diff --git a/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3be4a2355..cb165c2d4 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -390,6 +390,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, dev_kfree_skb_any(skb_orig); return NETDEV_TX_OK; } + dev_consume_skb_any(skb_orig); } if (eth_skb_pad(skb)) { diff --git a/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 4365c8bcc..605f6410f 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -61,6 +61,8 @@ struct mlxsw_sp { #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100 unsigned int interval; /* ms */ } fdb_notify; +#define MLXSW_SP_MIN_AGEING_TIME 10 +#define MLXSW_SP_MAX_AGEING_TIME 1000000 #define MLXSW_SP_DEFAULT_AGEING_TIME 300 u32 ageing_time; struct { diff --git a/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 7dbeafa65..d4c4c2b51 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -232,8 +232,13 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; - if (switchdev_trans_ph_prepare(trans)) - return 0; + if (switchdev_trans_ph_prepare(trans)) { + if (ageing_time < MLXSW_SP_MIN_AGEING_TIME || + ageing_time > MLXSW_SP_MAX_AGEING_TIME) + return -ERANGE; + else + return 0; + } return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); } diff --git a/kernel/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/kernel/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index d85960cfb..fb2d9a82c 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/kernel/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -313,6 +313,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, dev_kfree_skb_any(skb_orig); return NETDEV_TX_OK; } + dev_consume_skb_any(skb_orig); } mlxsw_sx_txhdr_construct(skb, &tx_info); len = skb->len; diff --git a/kernel/drivers/net/ethernet/neterion/vxge/vxge-main.c b/kernel/drivers/net/ethernet/neterion/vxge/vxge-main.c index 50d560483..e0993eba5 100644 --- a/kernel/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/kernel/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -2223,8 +2223,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) return IRQ_NONE; } -#ifdef CONFIG_PCI_MSI - static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) { struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; @@ -2442,16 +2440,13 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev) if (vdev->config.intr_type == MSI_X) pci_disable_msix(vdev->pdev); } -#endif static void vxge_rem_isr(struct vxgedev *vdev) { -#ifdef CONFIG_PCI_MSI - if (vdev->config.intr_type == MSI_X) { + if (IS_ENABLED(CONFIG_PCI_MSI) && + vdev->config.intr_type == MSI_X) { vxge_rem_msix_isr(vdev); - } else -#endif - if (vdev->config.intr_type == INTA) { + } else if (vdev->config.intr_type == INTA) { synchronize_irq(vdev->pdev->irq); free_irq(vdev->pdev->irq, vdev); } @@ -2460,11 +2455,10 @@ static void vxge_rem_isr(struct vxgedev *vdev) static int vxge_add_isr(struct vxgedev *vdev) { int ret = 0; -#ifdef CONFIG_PCI_MSI int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; int pci_fun = PCI_FUNC(vdev->pdev->devfn); - if (vdev->config.intr_type == MSI_X) + if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) ret = vxge_enable_msix(vdev); if (ret) { @@ -2475,7 +2469,7 @@ static int vxge_add_isr(struct vxgedev *vdev) vdev->config.intr_type = INTA; } - if (vdev->config.intr_type == MSI_X) { + if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) { for (intr_idx = 0; intr_idx < (vdev->no_of_vpath * VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) { @@ -2576,9 +2570,8 @@ static int vxge_add_isr(struct vxgedev *vdev) vdev->vxge_entries[intr_cnt].in_use = 1; vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0]; } -INTA_MODE: -#endif +INTA_MODE: if (vdev->config.intr_type == INTA) { snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge:INTA", vdev->ndev->name); @@ -3889,12 +3882,12 @@ static void vxge_device_config_init(struct vxge_hw_device_config *device_config, if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT) max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT; -#ifndef CONFIG_PCI_MSI - vxge_debug_init(VXGE_ERR, - "%s: This Kernel does not support " - "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); - *intr_type = INTA; -#endif + if (!IS_ENABLED(CONFIG_PCI_MSI)) { + vxge_debug_init(VXGE_ERR, + "%s: This Kernel does not support " + "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); + *intr_type = INTA; + } /* Configure whether MSI-X or IRQL. */ switch (*intr_type) { diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_spq.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_spq.c index 3dd548ab8..40365cb1a 100644 --- a/kernel/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -794,13 +794,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, * in a bitmap and increasing the chain consumer only * for the first successive completed entries. */ - bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE); + __set_bit(pos, p_spq->p_comp_bitmap); while (test_bit(p_spq->comp_bitmap_idx, p_spq->p_comp_bitmap)) { - bitmap_clear(p_spq->p_comp_bitmap, - p_spq->comp_bitmap_idx, - SPQ_RING_SIZE); + __clear_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap); p_spq->comp_bitmap_idx++; qed_chain_return_produced(&p_spq->chain); } diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 46bbea8e0..55007f1e6 100644 --- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -566,6 +566,7 @@ struct qlcnic_adapter_stats { u64 tx_dma_map_error; u64 spurious_intr; u64 mac_filter_limit_overrun; + u64 mbx_spurious_intr; }; /* @@ -1099,7 +1100,7 @@ struct qlcnic_mailbox { unsigned long status; spinlock_t queue_lock; /* Mailbox queue lock */ spinlock_t aen_lock; /* Mailbox response/AEN lock */ - atomic_t rsp_status; + u32 rsp_status; u32 num_cmds; }; diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 37a731be7..f9640d5ce 100644 --- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -491,7 +491,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter) static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx) { - atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED); + mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; complete(&mbx->completion); } @@ -510,7 +510,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter) if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); } else { - if (atomic_read(&mbx->rsp_status) != rsp_status) + if (mbx->rsp_status != rsp_status) qlcnic_83xx_notify_mbx_response(mbx); } out: @@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); } else { - if (atomic_read(&mbx->rsp_status) != rsp_status) + if (mbx->rsp_status != rsp_status) qlcnic_83xx_notify_mbx_response(mbx); } } @@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) { + u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; struct qlcnic_adapter *adapter = data; struct qlcnic_mailbox *mbx; - u32 mask, resp, event; unsigned long flags; mbx = adapter->ahw->mailbox; @@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) goto out; event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); - if (event & QLCNIC_MBX_ASYNC_EVENT) + if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - else - qlcnic_83xx_notify_mbx_response(mbx); + } else { + if (mbx->rsp_status != rsp_status) + qlcnic_83xx_notify_mbx_response(mbx); + else + adapter->stats.mbx_spurious_intr++; + } out: mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); @@ -4050,10 +4054,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) struct qlcnic_adapter *adapter = mbx->adapter; const struct qlcnic_mbx_ops *mbx_ops = mbx->ops; struct device *dev = &adapter->pdev->dev; - atomic_t *rsp_status = &mbx->rsp_status; struct list_head *head = &mbx->cmd_q; struct qlcnic_hardware_context *ahw; struct qlcnic_cmd_args *cmd = NULL; + unsigned long flags; ahw = adapter->ahw; @@ -4063,7 +4067,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) return; } - atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT); + spin_lock_irqsave(&mbx->aen_lock, flags); + mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT; + spin_unlock_irqrestore(&mbx->aen_lock, flags); spin_lock(&mbx->queue_lock); diff --git a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 494e8105a..0a2318cad 100644 --- a/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/kernel/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = { QLC_OFF(stats.mac_filter_limit_overrun)}, {"spurious intr", QLC_SIZEOF(stats.spurious_intr), QLC_OFF(stats.spurious_intr)}, - + {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr), + QLC_OFF(stats.mbx_spurious_intr)}, }; static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { diff --git a/kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 997976426..b28e73ea2 100644 --- a/kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/kernel/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, return; } skb_reserve(new_skb, NET_IP_ALIGN); + + pci_dma_sync_single_for_cpu(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + memcpy(skb_put(new_skb, length), skb->data, length); + + pci_dma_sync_single_for_device(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); skb = new_skb; /* Frame error, so drop the packet. */ diff --git a/kernel/drivers/net/ethernet/qualcomm/qca_spi.c b/kernel/drivers/net/ethernet/qualcomm/qca_spi.c index 689a4a5c8..1ef03939d 100644 --- a/kernel/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/kernel/drivers/net/ethernet/qualcomm/qca_spi.c @@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev) dev->netdev_ops = &qcaspi_netdev_ops; qcaspi_set_ethtool_ops(dev); dev->watchdog_timeo = QCASPI_TX_TIMEOUT; - dev->flags = IFF_MULTICAST; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->tx_queue_len = 100; qca = netdev_priv(dev); diff --git a/kernel/drivers/net/ethernet/renesas/ravb_main.c b/kernel/drivers/net/ethernet/renesas/ravb_main.c index 467d41698..549ad2018 100644 --- a/kernel/drivers/net/ethernet/renesas/ravb_main.c +++ b/kernel/drivers/net/ethernet/renesas/ravb_main.c @@ -1330,6 +1330,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + entry / NUM_TX_DESC * DPTR_ALIGN; len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + /* Zero length DMA descriptors are problematic as they seem to + * terminate DMA transfers. Avoid them by simply using a length of + * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. + * + * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of + * data by the call to skb_put_padto() above this is safe with + * respect to both the length of the first DMA descriptor (len) + * overflowing the available data and the length of the second DMA + * descriptor (skb->len - len) being negative. + */ + if (len == 0) + len = DPTR_ALIGN; + memcpy(buffer, skb->data, len); dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); if (dma_mapping_error(ndev->dev.parent, dma_addr)) diff --git a/kernel/drivers/net/ethernet/renesas/sh_eth.c b/kernel/drivers/net/ethernet/renesas/sh_eth.c index 6a8fc0f34..480f3dae0 100644 --- a/kernel/drivers/net/ethernet/renesas/sh_eth.c +++ b/kernel/drivers/net/ethernet/renesas/sh_eth.c @@ -832,7 +832,7 @@ static struct sh_eth_cpu_data r7s72100_data = { .ecsr_value = ECSR_ICD, .ecsipr_value = ECSIPR_ICDIP, - .eesipr_value = 0xff7f009f, + .eesipr_value = 0xe77f009f, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | @@ -1185,11 +1185,8 @@ static void sh_eth_ring_format(struct net_device *ndev) break; sh_eth_set_receive_align(skb); - /* RX descriptor */ - rxdesc = &mdp->rx_ring[i]; /* The size of the buffer is a multiple of 32 bytes. */ buf_len = ALIGN(mdp->rx_buf_sz, 32); - rxdesc->len = cpu_to_edmac(mdp, buf_len << 16); dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, dma_addr)) { @@ -1197,6 +1194,10 @@ static void sh_eth_ring_format(struct net_device *ndev) break; } mdp->rx_skbuff[i] = skb; + + /* RX descriptor */ + rxdesc = &mdp->rx_ring[i]; + rxdesc->len = cpu_to_edmac(mdp, buf_len << 16); rxdesc->addr = cpu_to_edmac(mdp, dma_addr); rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); @@ -1212,7 +1213,8 @@ static void sh_eth_ring_format(struct net_device *ndev) mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); /* Mark the last entry as wrapping the ring. */ - rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE); + if (rxdesc) + rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE); memset(mdp->tx_ring, 0, tx_ringsize); diff --git a/kernel/drivers/net/ethernet/rocker/rocker.c b/kernel/drivers/net/ethernet/rocker/rocker.c index 52ec3d6e0..3920c3eb6 100644 --- a/kernel/drivers/net/ethernet/rocker/rocker.c +++ b/kernel/drivers/net/ethernet/rocker/rocker.c @@ -239,6 +239,7 @@ struct rocker { struct { u64 id; } hw; + unsigned long ageing_time; spinlock_t cmd_ring_lock; /* for cmd ring accesses */ struct rocker_dma_ring_info cmd_ring; struct rocker_dma_ring_info event_ring; @@ -3704,7 +3705,7 @@ static void rocker_fdb_cleanup(unsigned long data) struct rocker_port *rocker_port; struct rocker_fdb_tbl_entry *entry; struct hlist_node *tmp; - unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME; + unsigned long next_timer = jiffies + rocker->ageing_time; unsigned long expires; unsigned long lock_flags; int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE | @@ -4367,8 +4368,12 @@ static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port, struct switchdev_trans *trans, u32 ageing_time) { + struct rocker *rocker = rocker_port->rocker; + if (!switchdev_trans_ph_prepare(trans)) { rocker_port->ageing_time = clock_t_to_jiffies(ageing_time); + if (rocker_port->ageing_time < rocker->ageing_time) + rocker->ageing_time = rocker_port->ageing_time; mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies); } @@ -4470,7 +4475,7 @@ static int rocker_port_obj_add(struct net_device *dev, fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); err = rocker_port_fib_ipv4(rocker_port, trans, htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, 0); + fib4->fi, fib4->tb_id, 0); break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = rocker_port_fdb_add(rocker_port, trans, @@ -4542,7 +4547,7 @@ static int rocker_port_obj_del(struct net_device *dev, fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); err = rocker_port_fib_ipv4(rocker_port, NULL, htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, + fib4->fi, fib4->tb_id, ROCKER_OP_FLAG_REMOVE); break; case SWITCHDEV_OBJ_ID_PORT_FDB: @@ -5206,10 +5211,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_init_tbls; } + rocker->ageing_time = BR_DEFAULT_AGEING_TIME; setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup, (unsigned long) rocker); mod_timer(&rocker->fdb_cleanup_timer, jiffies); + rocker->ageing_time = BR_DEFAULT_AGEING_TIME; + err = rocker_probe_ports(rocker); if (err) { dev_err(&pdev->dev, "failed to probe ports\n"); diff --git a/kernel/drivers/net/ethernet/sfc/ef10.c b/kernel/drivers/net/ethernet/sfc/ef10.c index e6a084a6b..cbe9a3301 100644 --- a/kernel/drivers/net/ethernet/sfc/ef10.c +++ b/kernel/drivers/net/ethernet/sfc/ef10.c @@ -619,6 +619,17 @@ fail: return rc; } +static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + /* All our existing PIO buffers went away */ + efx_for_each_channel(channel, efx) + efx_for_each_channel_tx_queue(tx_queue, channel) + tx_queue->piobuf = NULL; +} + #else /* !EFX_USE_PIO */ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) @@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx) { } +static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) +{ +} + #endif /* EFX_USE_PIO */ static void efx_ef10_remove(struct efx_nic *efx) @@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) nic_data->must_realloc_vis = true; nic_data->must_restore_filters = true; nic_data->must_restore_piobufs = true; + efx_ef10_forget_old_piobufs(efx); nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; /* Driver-created vswitches and vports must be re-created */ diff --git a/kernel/drivers/net/ethernet/smsc/smc91x.c b/kernel/drivers/net/ethernet/smsc/smc91x.c index 0e2fc1a84..23a038810 100644 --- a/kernel/drivers/net/ethernet/smsc/smc91x.c +++ b/kernel/drivers/net/ethernet/smsc/smc91x.c @@ -540,7 +540,7 @@ static inline void smc_rcv(struct net_device *dev) #define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) #define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) #else -#define smc_special_trylock(lock, flags) (flags == flags) +#define smc_special_trylock(lock, flags) ((void)flags, true) #define smc_special_lock(lock, flags) do { flags = 0; } while (0) #define smc_special_unlock(lock, flags) do { flags = 0; } while (0) #endif @@ -2269,6 +2269,13 @@ static int smc_drv_probe(struct platform_device *pdev) if (pd) { memcpy(&lp->cfg, pd, sizeof(lp->cfg)); lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags); + + if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) { + dev_err(&pdev->dev, + "at least one of 8-bit or 16-bit access support is required.\n"); + ret = -ENXIO; + goto out_free_netdev; + } } #if IS_BUILTIN(CONFIG_OF) diff --git a/kernel/drivers/net/ethernet/smsc/smc91x.h b/kernel/drivers/net/ethernet/smsc/smc91x.h index a3c129e1e..29df0465d 100644 --- a/kernel/drivers/net/ethernet/smsc/smc91x.h +++ b/kernel/drivers/net/ethernet/smsc/smc91x.h @@ -37,6 +37,27 @@ #include <linux/smc91x.h> /* + * Any 16-bit access is performed with two 8-bit accesses if the hardware + * can't do it directly. Most registers are 16-bit so those are mandatory. + */ +#define SMC_outw_b(x, a, r) \ + do { \ + unsigned int __val16 = (x); \ + unsigned int __reg = (r); \ + SMC_outb(__val16, a, __reg); \ + SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \ + } while (0) + +#define SMC_inw_b(a, r) \ + ({ \ + unsigned int __val16; \ + unsigned int __reg = r; \ + __val16 = SMC_inb(a, __reg); \ + __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \ + __val16; \ + }) + +/* * Define your architecture specific bus configuration parameters here. */ @@ -55,10 +76,30 @@ #define SMC_IO_SHIFT (lp->io_shift) #define SMC_inb(a, r) readb((a) + (r)) -#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inw(a, r) \ + ({ \ + unsigned int __smc_r = r; \ + SMC_16BIT(lp) ? readw((a) + __smc_r) : \ + SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \ + ({ BUG(); 0; }); \ + }) + #define SMC_inl(a, r) readl((a) + (r)) #define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outw(v, a, r) \ + do { \ + unsigned int __v = v, __smc_r = r; \ + if (SMC_16BIT(lp)) \ + __SMC_outw(__v, a, __smc_r); \ + else if (SMC_8BIT(lp)) \ + SMC_outw_b(__v, a, __smc_r); \ + else \ + BUG(); \ + } while (0) + #define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l) +#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l) #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) @@ -66,7 +107,7 @@ #define SMC_IRQ_FLAGS (-1) /* from resource */ /* We actually can't write halfwords properly if not word aligned */ -static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) +static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg) { if ((machine_is_mainstone() || machine_is_stargate2() || machine_is_pxa_idp()) && reg & 2) { @@ -405,24 +446,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, #if ! SMC_CAN_USE_16BIT -/* - * Any 16-bit access is performed with two 8-bit accesses if the hardware - * can't do it directly. Most registers are 16-bit so those are mandatory. - */ -#define SMC_outw(x, ioaddr, reg) \ - do { \ - unsigned int __val16 = (x); \ - SMC_outb( __val16, ioaddr, reg ); \ - SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ - } while (0) -#define SMC_inw(ioaddr, reg) \ - ({ \ - unsigned int __val16; \ - __val16 = SMC_inb( ioaddr, reg ); \ - __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \ - __val16; \ - }) - +#define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg) +#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg) #define SMC_insw(a, r, p, l) BUG() #define SMC_outsw(a, r, p, l) BUG() diff --git a/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a5b869eb4..4b100ef4a 100644 --- a/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2939,12 +2939,6 @@ int stmmac_dvr_probe(struct device *device, spin_lock_init(&priv->lock); spin_lock_init(&priv->tx_lock); - ret = register_netdev(ndev); - if (ret) { - pr_err("%s: ERROR %i registering the device\n", __func__, ret); - goto error_netdev_register; - } - /* If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be * changed at run-time and it is fixed. Viceversa the driver'll try to @@ -2969,11 +2963,21 @@ int stmmac_dvr_probe(struct device *device, } } - return 0; + ret = register_netdev(ndev); + if (ret) { + netdev_err(priv->dev, "%s: ERROR %i registering the device\n", + __func__, ret); + goto error_netdev_register; + } + + return ret; -error_mdio_register: - unregister_netdev(ndev); error_netdev_register: + if (priv->pcs != STMMAC_PCS_RGMII && + priv->pcs != STMMAC_PCS_TBI && + priv->pcs != STMMAC_PCS_RTBI) + stmmac_mdio_unregister(ndev); +error_mdio_register: netif_napi_del(&priv->napi); error_hw_init: clk_disable_unprepare(priv->pclk); diff --git a/kernel/drivers/net/ethernet/ti/cpmac.c b/kernel/drivers/net/ethernet/ti/cpmac.c index 77d26fe28..d52ea3008 100644 --- a/kernel/drivers/net/ethernet/ti/cpmac.c +++ b/kernel/drivers/net/ethernet/ti/cpmac.c @@ -549,7 +549,8 @@ fatal_error: static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { - int queue, len; + int queue; + unsigned int len; struct cpmac_desc *desc; struct cpmac_priv *priv = netdev_priv(dev); @@ -559,7 +560,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(skb_padto(skb, ETH_ZLEN))) return NETDEV_TX_OK; - len = max(skb->len, ETH_ZLEN); + len = max_t(unsigned int, skb->len, ETH_ZLEN); queue = skb_get_queue_mapping(skb); netif_stop_subqueue(dev, queue); |