From e09b41010ba33a20a87472ee821fa407a5b8da36 Mon Sep 17 00:00:00 2001 From: José Pekkarinen Date: Mon, 11 Apr 2016 10:41:07 +0300 Subject: These changes are the raw update to linux-4.4.6-rt14. Kernel sources are taken from kernel.org, and rt patch from the rt wiki download page. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen --- kernel/drivers/net/ethernet/freescale/Kconfig | 11 +- kernel/drivers/net/ethernet/freescale/fec.h | 3 + kernel/drivers/net/ethernet/freescale/fec_main.c | 265 +++++-- kernel/drivers/net/ethernet/freescale/fec_ptp.c | 23 +- .../net/ethernet/freescale/fs_enet/fs_enet-main.c | 36 +- .../net/ethernet/freescale/fs_enet/mac-fcc.c | 2 +- .../net/ethernet/freescale/fs_enet/mac-fec.c | 2 +- .../drivers/net/ethernet/freescale/fsl_pq_mdio.c | 34 +- kernel/drivers/net/ethernet/freescale/gianfar.c | 832 ++++++++++++--------- kernel/drivers/net/ethernet/freescale/gianfar.h | 93 ++- .../net/ethernet/freescale/gianfar_ethtool.c | 401 +--------- .../drivers/net/ethernet/freescale/gianfar_ptp.c | 3 +- kernel/drivers/net/ethernet/freescale/ucc_geth.c | 8 +- .../net/ethernet/freescale/ucc_geth_ethtool.c | 2 - 14 files changed, 901 insertions(+), 814 deletions(-) (limited to 'kernel/drivers/net/ethernet/freescale') diff --git a/kernel/drivers/net/ethernet/freescale/Kconfig b/kernel/drivers/net/ethernet/freescale/Kconfig index 25e342572..bee32a9d9 100644 --- a/kernel/drivers/net/ethernet/freescale/Kconfig +++ b/kernel/drivers/net/ethernet/freescale/Kconfig @@ -7,11 +7,10 @@ config NET_VENDOR_FREESCALE default y depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ M523x || M527x || M5272 || M528x || M520x || M532x || \ - ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) + ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \ + ARCH_LAYERSCAPE ---help--- - If you have a network (Ethernet) card belonging to this class, say Y - and read the Ethernet-HOWTO, available from - . + If you have a network (Ethernet) card belonging to this class, say Y. Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all @@ -85,12 +84,12 @@ config UGETH_TX_ON_DEMAND config GIANFAR tristate "Gianfar Ethernet" - depends on FSL_SOC select FSL_PQ_MDIO select PHYLIB select CRC32 ---help--- This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, - and MPC86xx family of chips, and the FEC on the 8540. + and MPC86xx family of chips, the eTSEC on LS1021A and the FEC + on the 8540. endif # NET_VENDOR_FREESCALE diff --git a/kernel/drivers/net/ethernet/freescale/fec.h b/kernel/drivers/net/ethernet/freescale/fec.h index a86af8a74..99d33e2d3 100644 --- a/kernel/drivers/net/ethernet/freescale/fec.h +++ b/kernel/drivers/net/ethernet/freescale/fec.h @@ -428,6 +428,8 @@ struct bufdesc_ex { #define FEC_QUIRK_BUG_CAPTURE (1 << 10) /* Controller has only one MDIO bus */ #define FEC_QUIRK_SINGLE_MDIO (1 << 11) +/* Controller supports RACC register */ +#define FEC_QUIRK_HAS_RACC (1 << 12) struct fec_enet_priv_tx_q { int index; @@ -560,6 +562,7 @@ struct fec_enet_private { }; void fec_ptp_init(struct platform_device *pdev); +void fec_ptp_stop(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); diff --git a/kernel/drivers/net/ethernet/freescale/fec_main.c b/kernel/drivers/net/ethernet/freescale/fec_main.c index 570390b5c..b2a32209f 100644 --- a/kernel/drivers/net/ethernet/freescale/fec_main.c +++ b/kernel/drivers/net/ethernet/freescale/fec_main.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -77,6 +78,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define FEC_ENET_RAEM_V 0x8 #define FEC_ENET_RAFL_V 0x8 #define FEC_ENET_OPD_V 0xFFF0 +#define FEC_MDIO_PM_TIMEOUT 100 /* ms */ static struct platform_device_id fec_devtype[] = { { @@ -85,28 +87,30 @@ static struct platform_device_id fec_devtype[] = { .driver_data = 0, }, { .name = "imx25-fec", - .driver_data = FEC_QUIRK_USE_GASKET, + .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC, }, { .name = "imx27-fec", - .driver_data = 0, + .driver_data = FEC_QUIRK_HAS_RACC, }, { .name = "imx28-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | - FEC_QUIRK_SINGLE_MDIO, + FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC, }, { .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358, + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | + FEC_QUIRK_HAS_RACC, }, { .name = "mvf600-fec", - .driver_data = FEC_QUIRK_ENET_MAC, + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, }, { .name = "imx6sx-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | - FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE, + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC, }, { /* sentinel */ } @@ -360,7 +364,7 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) return 0; } -static int +static struct bufdesc * fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, struct net_device *ndev) @@ -435,10 +439,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, bdp->cbd_sc = status; } - txq->cur_tx = bdp; - - return 0; - + return bdp; dma_mapping_error: bdp = txq->cur_tx; for (i = 0; i < frag; i++) { @@ -446,7 +447,7 @@ dma_mapping_error: dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, DMA_TO_DEVICE); } - return NETDEV_TX_OK; + return ERR_PTR(-ENOMEM); } static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, @@ -463,7 +464,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, unsigned int estatus = 0; unsigned int index; int entries_free; - int ret; entries_free = fec_enet_get_free_txdesc_num(fep, txq); if (entries_free < MAX_SKB_FRAGS + 1) { @@ -481,6 +481,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, /* Fill in a Tx ring entry */ bdp = txq->cur_tx; + last_bdp = bdp; status = bdp->cbd_sc; status &= ~BD_ENET_TX_STATS; @@ -509,9 +510,9 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, } if (nr_frags) { - ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); - if (ret) - return ret; + last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); + if (IS_ERR(last_bdp)) + return NETDEV_TX_OK; } else { status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); if (fep->bufdesc_ex) { @@ -540,7 +541,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ebdp->cbd_esc = estatus; } - last_bdp = txq->cur_tx; index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); /* Save skb pointer */ txq->tx_skbuff[index] = skb; @@ -559,6 +559,10 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, skb_tx_timestamp(skb); + /* Make sure the update to bdp and tx_skbuff are performed before + * cur_tx. + */ + wmb(); txq->cur_tx = bdp; /* Trigger transmission start */ @@ -970,13 +974,15 @@ fec_restart(struct net_device *ndev) writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); #if !defined(CONFIG_M5272) - /* set RX checksum */ - val = readl(fep->hwp + FEC_RACC); - if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) - val |= FEC_RACC_OPTIONS; - else - val &= ~FEC_RACC_OPTIONS; - writel(val, fep->hwp + FEC_RACC); + if (fep->quirks & FEC_QUIRK_HAS_RACC) { + /* set RX checksum */ + val = readl(fep->hwp + FEC_RACC); + if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) + val |= FEC_RACC_OPTIONS; + else + val &= ~FEC_RACC_OPTIONS; + writel(val, fep->hwp + FEC_RACC); + } #endif /* @@ -1212,10 +1218,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) /* get next bdp of dirty_tx */ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); - while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { - - /* current queue is empty */ - if (bdp == txq->cur_tx) + while (bdp != READ_ONCE(txq->cur_tx)) { + /* Order the load of cur_tx and cbd_sc */ + rmb(); + status = READ_ONCE(bdp->cbd_sc); + if (status & BD_ENET_TX_READY) break; index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); @@ -1269,6 +1276,10 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); + /* Make sure the update to bdp and tx_skbuff are performed + * before dirty_tx + */ + wmb(); txq->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ @@ -1764,10 +1775,16 @@ static void fec_enet_adjust_link(struct net_device *ndev) static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; unsigned long time_left; + int ret = 0; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + return ret; fep->mii_timeout = 0; - init_completion(&fep->mdio_done); + reinit_completion(&fep->mdio_done); /* start a read op */ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | @@ -1780,21 +1797,35 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) if (time_left == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO read timeout\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto out; } - /* return value */ - return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); + ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); + +out: + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; } static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; unsigned long time_left; + int ret; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + return ret; + else + ret = 0; fep->mii_timeout = 0; - init_completion(&fep->mdio_done); + reinit_completion(&fep->mdio_done); /* start a write op */ writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | @@ -1808,10 +1839,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, if (time_left == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO write timeout\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; } - return 0; + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; } static int fec_enet_clk_enable(struct net_device *ndev, bool enable) @@ -1823,9 +1857,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) ret = clk_prepare_enable(fep->clk_ahb); if (ret) return ret; - ret = clk_prepare_enable(fep->clk_ipg); - if (ret) - goto failed_clk_ipg; if (fep->clk_enet_out) { ret = clk_prepare_enable(fep->clk_enet_out); if (ret) @@ -1849,7 +1880,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) } } else { clk_disable_unprepare(fep->clk_ahb); - clk_disable_unprepare(fep->clk_ipg); if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { @@ -1871,8 +1901,6 @@ failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); failed_clk_enet_out: - clk_disable_unprepare(fep->clk_ipg); -failed_clk_ipg: clk_disable_unprepare(fep->clk_ahb); return ret; @@ -2119,6 +2147,82 @@ static void fec_enet_get_drvinfo(struct net_device *ndev, strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); } +static int fec_enet_get_regs_len(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct resource *r; + int s = 0; + + r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); + if (r) + s = resource_size(r); + + return s; +} + +/* List of registers that can be safety be read to dump them with ethtool */ +#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ + defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ + defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) +static u32 fec_enet_register_offset[] = { + FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, + FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, + FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, + FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, + FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, + FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, + FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, + FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, + FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, + FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, + FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, + FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, + RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, + RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, + RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, + RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, + RMON_T_P_GTE2048, RMON_T_OCTETS, + IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, + IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, + IEEE_T_FDXFC, IEEE_T_OCTETS_OK, + RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, + RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, + RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, + RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, + RMON_R_P_GTE2048, RMON_R_OCTETS, + IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, + IEEE_R_FDXFC, IEEE_R_OCTETS_OK +}; +#else +static u32 fec_enet_register_offset[] = { + FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, + FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, + FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, + FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, + FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, + FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, + FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, + FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, + FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 +}; +#endif + +static void fec_enet_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *regbuf) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + u32 __iomem *theregs = (u32 __iomem *)fep->hwp; + u32 *buf = (u32 *)regbuf; + u32 i, off; + + memset(buf, 0, regs->len); + + for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { + off = fec_enet_register_offset[i] / 4; + buf[off] = readl(&theregs[off]); + } +} + static int fec_enet_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) { @@ -2516,6 +2620,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, + .get_regs_len = fec_enet_get_regs_len, + .get_regs = fec_enet_get_regs, .nway_reset = fec_enet_nway_reset, .get_link = ethtool_op_get_link, .get_coalesce = fec_enet_get_coalesce, @@ -2766,10 +2872,14 @@ fec_enet_open(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); int ret; + ret = pm_runtime_get_sync(&fep->pdev->dev); + if (ret < 0) + return ret; + pinctrl_pm_select_default_state(&fep->pdev->dev); ret = fec_enet_clk_enable(ndev, true); if (ret) - return ret; + goto clk_enable; /* I should reset the ring buffers here, but I don't yet know * a simple way to do that. @@ -2779,12 +2889,14 @@ fec_enet_open(struct net_device *ndev) if (ret) goto err_enet_alloc; + /* Init MAC prior to mii bus probe */ + fec_restart(ndev); + /* Probe and connect to PHY when open the interface */ ret = fec_enet_mii_probe(ndev); if (ret) goto err_enet_mii_probe; - fec_restart(ndev); napi_enable(&fep->napi); phy_start(fep->phy_dev); netif_tx_start_all_queues(ndev); @@ -2798,6 +2910,9 @@ err_enet_mii_probe: fec_enet_free_buffers(ndev); err_enet_alloc: fec_enet_clk_enable(ndev, false); +clk_enable: + pm_runtime_mark_last_busy(&fep->pdev->dev); + pm_runtime_put_autosuspend(&fep->pdev->dev); pinctrl_pm_select_sleep_state(&fep->pdev->dev); return ret; } @@ -2820,6 +2935,9 @@ fec_enet_close(struct net_device *ndev) fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); + pm_runtime_mark_last_busy(&fep->pdev->dev); + pm_runtime_put_autosuspend(&fep->pdev->dev); + fec_enet_free_buffers(ndev); return 0; @@ -2913,6 +3031,14 @@ fec_set_mac_address(struct net_device *ndev, void *p) memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); } + /* Add netif status check here to avoid system hang in below case: + * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; + * After ethx down, fec all clocks are gated off and then register + * access causes system hang. + */ + if (!netif_running(ndev)) + return 0; + writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), fep->hwp + FEC_ADDR_LOW); @@ -2944,7 +3070,6 @@ static void fec_poll_controller(struct net_device *dev) } #endif -#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM static inline void fec_enet_set_netdev_features(struct net_device *netdev, netdev_features_t features) { @@ -2968,7 +3093,7 @@ static int fec_set_features(struct net_device *netdev, struct fec_enet_private *fep = netdev_priv(netdev); netdev_features_t changed = features ^ netdev->features; - if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { + if (netif_running(netdev) && changed & NETIF_F_RXCSUM) { napi_disable(&fep->napi); netif_tx_lock_bh(netdev); fec_stop(netdev); @@ -3032,8 +3157,8 @@ static int fec_enet_init(struct net_device *ndev) fep->bufdesc_size; /* Allocate memory for buffer descriptors. */ - cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, - GFP_KERNEL); + cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, + GFP_KERNEL); if (!cbd_base) { return -ENOMEM; } @@ -3136,7 +3261,7 @@ static void fec_reset_phy(struct platform_device *pdev) return; } msleep(msec); - gpio_set_value(phy_reset, 1); + gpio_set_value_cansleep(phy_reset, 1); } #else /* CONFIG_OF */ static void fec_reset_phy(struct platform_device *pdev) @@ -3305,6 +3430,10 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_clk; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { ret = regulator_enable(fep->reg_phy); @@ -3317,6 +3446,12 @@ fec_probe(struct platform_device *pdev) fep->reg_phy = NULL; } + pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + fec_reset_phy(pdev); if (fep->bufdesc_ex) @@ -3364,6 +3499,10 @@ fec_probe(struct platform_device *pdev) fep->rx_copybreak = COPYBREAK_DEFAULT; INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + return 0; failed_register: @@ -3371,9 +3510,12 @@ failed_register: failed_mii_init: failed_irq: failed_init: + fec_ptp_stop(pdev); if (fep->reg_phy) regulator_disable(fep->reg_phy); failed_regulator: + clk_disable_unprepare(fep->clk_ipg); +failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: failed_phy: @@ -3390,14 +3532,12 @@ fec_drv_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - cancel_delayed_work_sync(&fep->time_keep); cancel_work_sync(&fep->tx_timeout_work); + fec_ptp_stop(pdev); unregister_netdev(ndev); fec_enet_mii_remove(fep); if (fep->reg_phy) regulator_disable(fep->reg_phy); - if (fep->ptp_clock) - ptp_clock_unregister(fep->ptp_clock); of_node_put(fep->phy_node); free_netdev(ndev); @@ -3485,7 +3625,28 @@ failed_clk: return ret; } -static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); +static int __maybe_unused fec_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + + clk_disable_unprepare(fep->clk_ipg); + + return 0; +} + +static int __maybe_unused fec_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + + return clk_prepare_enable(fep->clk_ipg); +} + +static const struct dev_pm_ops fec_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) + SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) +}; static struct platform_driver fec_driver = { .driver = { diff --git a/kernel/drivers/net/ethernet/freescale/fec_ptp.c b/kernel/drivers/net/ethernet/freescale/fec_ptp.c index a583d89b1..f9e74461b 100644 --- a/kernel/drivers/net/ethernet/freescale/fec_ptp.c +++ b/kernel/drivers/net/ethernet/freescale/fec_ptp.c @@ -112,9 +112,8 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) unsigned long flags; u32 val, tempval; int inc; - struct timespec ts; + struct timespec64 ts; u64 ns; - u32 remainder; val = 0; if (!(fep->hwts_tx_en || fep->hwts_rx_en)) { @@ -163,8 +162,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) tempval = readl(fep->hwp + FEC_ATIME); /* Convert the ptp local counter to 1588 timestamp */ ns = timecounter_cyc2time(&fep->tc, tempval); - ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); - ts.tv_nsec = remainder; + ts = ns_to_timespec64(ns); /* The tempval is less than 3 seconds, and so val is less than * 4 seconds. No overflow for 32bit calculation. @@ -353,6 +351,7 @@ static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; tmp |= corr_ns << FEC_T_INC_CORR_OFFSET; writel(tmp, fep->hwp + FEC_ATIME_INC); + corr_period = corr_period > 1 ? corr_period - 1 : corr_period; writel(corr_period, fep->hwp + FEC_ATIME_CORR); /* dummy read to update the timer. */ timecounter_read(&fep->tc); @@ -505,12 +504,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) break; default: - /* - * register RXMTRL must be set in order to do V1 packets, - * therefore it is not possible to time stamp both V1 Sync and - * Delay_Req messages and hardware does not support - * timestamping all packets => return error - */ fep->hwts_rx_en = 1; config.rx_filter = HWTSTAMP_FILTER_ALL; break; @@ -603,6 +596,16 @@ void fec_ptp_init(struct platform_device *pdev) schedule_delayed_work(&fep->time_keep, HZ); } +void fec_ptp_stop(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + + cancel_delayed_work_sync(&fep->time_keep); + if (fep->ptp_clock) + ptp_clock_unregister(fep->ptp_clock); +} + /** * fec_ptp_check_pps_event * @fep: the fec_enet_private structure handle diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/kernel/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 9b3639eae..cf8e54652 100644 --- a/kernel/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/kernel/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -86,7 +86,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) struct net_device *dev = fep->ndev; const struct fs_platform_info *fpi = fep->fpi; cbd_t __iomem *bdp; - struct sk_buff *skb, *skbn, *skbt; + struct sk_buff *skb, *skbn; int received = 0; u16 pkt_len, sc; int curidx; @@ -161,10 +161,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) skb_reserve(skbn, 2); /* align IP header */ skb_copy_from_linear_data(skb, skbn->data, pkt_len); - /* swap */ - skbt = skb; - skb = skbn; - skbn = skbt; + swap(skb, skbn); } } else { skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); @@ -490,6 +487,9 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, { struct sk_buff *new_skb; + if (skb_linearize(skb)) + return NULL; + /* Alloc new skb */ new_skb = netdev_alloc_skb(dev, skb->len + 4); if (!new_skb) @@ -515,12 +515,27 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) cbd_t __iomem *bdp; int curidx; u16 sc; - int nr_frags = skb_shinfo(skb)->nr_frags; + int nr_frags; skb_frag_t *frag; int len; - #ifdef CONFIG_FS_ENET_MPC5121_FEC - if (((unsigned long)skb->data) & 0x3) { + int is_aligned = 1; + int i; + + if (!IS_ALIGNED((unsigned long)skb->data, 4)) { + is_aligned = 0; + } else { + nr_frags = skb_shinfo(skb)->nr_frags; + frag = skb_shinfo(skb)->frags; + for (i = 0; i < nr_frags; i++, frag++) { + if (!IS_ALIGNED(frag->page_offset, 4)) { + is_aligned = 0; + break; + } + } + } + + if (!is_aligned) { skb = tx_skb_align_workaround(dev, skb); if (!skb) { /* @@ -532,6 +547,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) } } #endif + spin_lock(&fep->tx_lock); /* @@ -539,6 +555,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) */ bdp = fep->cur_tx; + nr_frags = skb_shinfo(skb)->nr_frags; if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { netif_stop_queue(dev); spin_unlock(&fep->tx_lock); @@ -569,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) frag = skb_shinfo(skb)->frags; while (nr_frags) { CBDC_SC(bdp, - BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); + BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST | + BD_ENET_TX_TC); CBDS_SC(bdp, BD_ENET_TX_READY); if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 08f5b911d..52e0091b4 100644 --- a/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev) cbd_t __iomem *prev_bd; cbd_t __iomem *last_tx_bd; - last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); + last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t)); /* get the current bd held in TBPTR and scan back from this point */ recheck_bd = curr_tbptr = (cbd_t __iomem *) diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index b34214e2d..016743e35 100644 --- a/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep) } #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) -#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) +#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF) #define FEC_RX_EVENT (FEC_ENET_RXF) #define FEC_TX_EVENT (FEC_ENET_TXF) #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ diff --git a/kernel/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/kernel/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 3c40f6b99..40071dad1 100644 --- a/kernel/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/kernel/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -198,17 +198,28 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus) #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) /* + * Return the TBIPA address, starting from the address + * of the mapped GFAR MDIO registers (struct gfar) * This is mildly evil, but so is our hardware for doing this. * Also, we have to cast back to struct gfar because of * definition weirdness done in gianfar.h. */ -static uint32_t __iomem *get_gfar_tbipa(void __iomem *p) +static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p) { struct gfar __iomem *enet_regs = p; return &enet_regs->tbipa; } +/* + * Return the TBIPA address, starting from the address + * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar) + */ +static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p) +{ + return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs)); +} + /* * Return the TBIPAR address for an eTSEC2 node */ @@ -220,11 +231,12 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p) #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) /* - * Return the TBIPAR address for a QE MDIO node + * Return the TBIPAR address for a QE MDIO node, starting from the address + * of the mapped MII registers (struct fsl_pq_mii) */ static uint32_t __iomem *get_ucc_tbipa(void __iomem *p) { - struct fsl_pq_mdio __iomem *mdio = p; + struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii); return &mdio->utbipar; } @@ -300,14 +312,14 @@ static const struct of_device_id fsl_pq_mdio_match[] = { .compatible = "fsl,gianfar-tbi", .data = &(struct fsl_pq_mdio_data) { .mii_offset = 0, - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mii, }, }, { .compatible = "fsl,gianfar-mdio", .data = &(struct fsl_pq_mdio_data) { .mii_offset = 0, - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mii, }, }, { @@ -315,7 +327,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = { .compatible = "gianfar", .data = &(struct fsl_pq_mdio_data) { .mii_offset = offsetof(struct fsl_pq_mdio, mii), - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mdio, }, }, { @@ -445,6 +457,16 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) tbipa = data->get_tbipa(priv->map); + /* + * Add consistency check to make sure TBI is contained + * within the mapped range (not because we would get a + * segfault, rather to catch bugs in computing TBI + * address). Print error message but continue anyway. + */ + if ((void *)tbipa > priv->map + resource_size(&res) - 4) + dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n", + ((void *)tbipa - priv->map) + 4); + iowrite32be(be32_to_cpup(prop), tbipa); } } diff --git a/kernel/drivers/net/ethernet/freescale/gianfar.c b/kernel/drivers/net/ethernet/freescale/gianfar.c index e616b71d5..3e233d924 100644 --- a/kernel/drivers/net/ethernet/freescale/gianfar.c +++ b/kernel/drivers/net/ethernet/freescale/gianfar.c @@ -107,17 +107,17 @@ #include "gianfar.h" -#define TX_TIMEOUT (1*HZ) +#define TX_TIMEOUT (5*HZ) -const char gfar_driver_version[] = "1.3"; +const char gfar_driver_version[] = "2.0"; static int gfar_enet_open(struct net_device *dev); static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); -static struct sk_buff *gfar_new_skb(struct net_device *dev, - dma_addr_t *bufaddr); +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, + int alloc_cnt); static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); static irqreturn_t gfar_error(int irq, void *dev_id); @@ -141,8 +141,7 @@ static void gfar_netpoll(struct net_device *dev); #endif int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); -static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull, struct napi_struct *napi); +static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb); static void gfar_halt_nodisable(struct gfar_private *priv); static void gfar_clear_exact_match(struct net_device *dev); static void gfar_set_mac_for_addr(struct net_device *dev, int num, @@ -169,17 +168,15 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, bdp->lstatus = cpu_to_be32(lstatus); } -static int gfar_init_bds(struct net_device *ndev) +static void gfar_init_bds(struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); struct gfar __iomem *regs = priv->gfargrp[0].regs; struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *txbdp; - struct rxbd8 *rxbdp; u32 __iomem *rfbptr; int i, j; - dma_addr_t bufaddr; for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; @@ -207,40 +204,26 @@ static int gfar_init_bds(struct net_device *ndev) rfbptr = ®s->rfbptr0; for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - rx_queue->cur_rx = rx_queue->rx_bd_base; - rx_queue->skb_currx = 0; - rxbdp = rx_queue->rx_bd_base; - for (j = 0; j < rx_queue->rx_ring_size; j++) { - struct sk_buff *skb = rx_queue->rx_skbuff[j]; + rx_queue->next_to_clean = 0; + rx_queue->next_to_use = 0; + rx_queue->next_to_alloc = 0; - if (skb) { - bufaddr = be32_to_cpu(rxbdp->bufPtr); - } else { - skb = gfar_new_skb(ndev, &bufaddr); - if (!skb) { - netdev_err(ndev, "Can't allocate RX buffers\n"); - return -ENOMEM; - } - rx_queue->rx_skbuff[j] = skb; - } - - gfar_init_rxbdp(rx_queue, rxbdp, bufaddr); - rxbdp++; - } + /* make sure next_to_clean != next_to_use after this + * by leaving at least 1 unused descriptor + */ + gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); rx_queue->rfbptr = rfbptr; rfbptr += 2; } - - return 0; } static int gfar_alloc_skb_resources(struct net_device *ndev) { void *vaddr; dma_addr_t addr; - int i, j, k; + int i, j; struct gfar_private *priv = netdev_priv(ndev); struct device *dev = priv->dev; struct gfar_priv_tx_q *tx_queue = NULL; @@ -279,7 +262,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) rx_queue = priv->rx_queue[i]; rx_queue->rx_bd_base = vaddr; rx_queue->rx_bd_dma_base = addr; - rx_queue->dev = ndev; + rx_queue->ndev = ndev; + rx_queue->dev = dev; addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; } @@ -294,25 +278,20 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) if (!tx_queue->tx_skbuff) goto cleanup; - for (k = 0; k < tx_queue->tx_ring_size; k++) - tx_queue->tx_skbuff[k] = NULL; + for (j = 0; j < tx_queue->tx_ring_size; j++) + tx_queue->tx_skbuff[j] = NULL; } for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - rx_queue->rx_skbuff = - kmalloc_array(rx_queue->rx_ring_size, - sizeof(*rx_queue->rx_skbuff), - GFP_KERNEL); - if (!rx_queue->rx_skbuff) + rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, + sizeof(*rx_queue->rx_buff), + GFP_KERNEL); + if (!rx_queue->rx_buff) goto cleanup; - - for (j = 0; j < rx_queue->rx_ring_size; j++) - rx_queue->rx_skbuff[j] = NULL; } - if (gfar_init_bds(ndev)) - goto cleanup; + gfar_init_bds(ndev); return 0; @@ -354,28 +333,16 @@ static void gfar_init_rqprm(struct gfar_private *priv) } } -static void gfar_rx_buff_size_config(struct gfar_private *priv) +static void gfar_rx_offload_en(struct gfar_private *priv) { - int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; - /* set this when rx hw offload (TOE) functions are being used */ priv->uses_rxfcb = 0; if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) priv->uses_rxfcb = 1; - if (priv->hwts_rx_en) + if (priv->hwts_rx_en || priv->rx_filer_enable) priv->uses_rxfcb = 1; - - if (priv->uses_rxfcb) - frame_size += GMAC_FCB_LEN; - - frame_size += priv->padding; - - frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + - INCREMENTAL_BUFFER_SIZE; - - priv->rx_buffer_size = frame_size; } static void gfar_mac_rx_config(struct gfar_private *priv) @@ -384,7 +351,7 @@ static void gfar_mac_rx_config(struct gfar_private *priv) u32 rctrl = 0; if (priv->rx_filer_enable) { - rctrl |= RCTRL_FILREN; + rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; /* Program the RIR0 reg with the required distribution */ if (priv->poll_mode == GFAR_SQ_POLLING) gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); @@ -516,6 +483,15 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) return &dev->stats; } +static int gfar_set_mac_addr(struct net_device *dev, void *p) +{ + eth_mac_addr(dev, p); + + gfar_set_mac_for_addr(dev, 0, dev->dev_addr); + + return 0; +} + static const struct net_device_ops gfar_netdev_ops = { .ndo_open = gfar_enet_open, .ndo_start_xmit = gfar_start_xmit, @@ -526,7 +502,7 @@ static const struct net_device_ops gfar_netdev_ops = { .ndo_tx_timeout = gfar_timeout, .ndo_do_ioctl = gfar_ioctl, .ndo_get_stats = gfar_get_stats, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = gfar_set_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = gfar_netpoll, @@ -556,22 +532,6 @@ static void gfar_ints_enable(struct gfar_private *priv) } } -static void lock_tx_qs(struct gfar_private *priv) -{ - int i; - - for (i = 0; i < priv->num_tx_queues; i++) - spin_lock(&priv->tx_queue[i]->txlock); -} - -static void unlock_tx_qs(struct gfar_private *priv) -{ - int i; - - for (i = 0; i < priv->num_tx_queues; i++) - spin_unlock(&priv->tx_queue[i]->txlock); -} - static int gfar_alloc_tx_queues(struct gfar_private *priv) { int i; @@ -600,9 +560,8 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv) if (!priv->rx_queue[i]) return -ENOMEM; - priv->rx_queue[i]->rx_skbuff = NULL; priv->rx_queue[i]->qindex = i; - priv->rx_queue[i]->dev = priv->ndev; + priv->rx_queue[i]->ndev = priv->ndev; } return 0; } @@ -688,9 +647,9 @@ static int gfar_parse_group(struct device_node *np, if (model && strcasecmp(model, "FEC")) { gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); - if (gfar_irq(grp, TX)->irq == NO_IRQ || - gfar_irq(grp, RX)->irq == NO_IRQ || - gfar_irq(grp, ER)->irq == NO_IRQ) + if (!gfar_irq(grp, TX)->irq || + !gfar_irq(grp, RX)->irq || + !gfar_irq(grp, ER)->irq) return -EINVAL; } @@ -935,7 +894,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) FSL_GIANFAR_DEV_HAS_VLAN | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | - FSL_GIANFAR_DEV_HAS_TIMER; + FSL_GIANFAR_DEV_HAS_TIMER | + FSL_GIANFAR_DEV_HAS_RX_FILER; err = of_property_read_string(np, "phy-connection-type", &ctype); @@ -948,6 +908,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) if (of_find_property(np, "fsl,magic-packet", NULL)) priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; + if (of_get_property(np, "fsl,wake-on-filer", NULL)) + priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER; + priv->phy_node = of_parse_phandle(np, "phy-handle", 0); /* In the case of a fixed PHY, the DT node associated @@ -1194,12 +1157,11 @@ void gfar_mac_reset(struct gfar_private *priv) udelay(3); - /* Compute rx_buff_size based on config flags */ - gfar_rx_buff_size_config(priv); + gfar_rx_offload_en(priv); /* Initialize the max receive frame/buffer lengths */ - gfar_write(®s->maxfrm, priv->rx_buffer_size); - gfar_write(®s->mrblr, priv->rx_buffer_size); + gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); + gfar_write(®s->mrblr, GFAR_RXB_SIZE); /* Initialize the Minimum Frame Length Register */ gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); @@ -1207,12 +1169,11 @@ void gfar_mac_reset(struct gfar_private *priv) /* Initialize MACCFG2. */ tempval = MACCFG2_INIT_SETTINGS; - /* If the mtu is larger than the max size for standard - * ethernet frames (ie, a jumbo frame), then set maccfg2 - * to allow huge frames, and to check the length + /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 + * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, + * and by checking RxBD[LG] and discarding larger than MAXFRM. */ - if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || - gfar_has_errata(priv, GFAR_ERRATA_74)) + if (gfar_has_errata(priv, GFAR_ERRATA_74)) tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; gfar_write(®s->maccfg2, tempval); @@ -1367,7 +1328,6 @@ static int gfar_probe(struct platform_device *ofdev) priv->dev = &ofdev->dev; SET_NETDEV_DEV(dev, &ofdev->dev); - spin_lock_init(&priv->bflock); INIT_WORK(&priv->reset_task, gfar_reset_task); platform_set_drvdata(ofdev, priv); @@ -1411,6 +1371,8 @@ static int gfar_probe(struct platform_device *ofdev) dev->features |= NETIF_F_HW_VLAN_CTAG_RX; } + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + gfar_init_addr_hash_table(priv); /* Insert receive time stamps into padding alignment bytes */ @@ -1421,8 +1383,6 @@ static int gfar_probe(struct platform_device *ofdev) priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) dev->needed_headroom = GMAC_FCB_LEN; - priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; - /* Initializing some of the rx/tx queue level parameters */ for (i = 0; i < priv->num_tx_queues; i++) { priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; @@ -1437,8 +1397,9 @@ static int gfar_probe(struct platform_device *ofdev) priv->rx_queue[i]->rxic = DEFAULT_RXIC; } - /* always enable rx filer */ - priv->rx_filer_enable = 1; + /* Always enable rx filer if available */ + priv->rx_filer_enable = + (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; /* Enable most messages by default */ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; /* use pritority h/w tx queue scheduling for single queue devices */ @@ -1459,9 +1420,14 @@ static int gfar_probe(struct platform_device *ofdev) goto register_fail; } - device_init_wakeup(&dev->dev, - priv->device_flags & - FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) + priv->wol_supported |= GFAR_WOL_MAGIC; + + if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) && + priv->rx_filer_enable) + priv->wol_supported |= GFAR_WOL_FILER_UCAST; + + device_set_wakeup_capable(&ofdev->dev, priv->wol_supported); /* fill out IRQ number and name fields */ for (i = 0; i < priv->num_grps; i++) { @@ -1524,53 +1490,153 @@ static int gfar_remove(struct platform_device *ofdev) #ifdef CONFIG_PM +static void __gfar_filer_disable(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 temp; + + temp = gfar_read(®s->rctrl); + temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT); + gfar_write(®s->rctrl, temp); +} + +static void __gfar_filer_enable(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 temp; + + temp = gfar_read(®s->rctrl); + temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT; + gfar_write(®s->rctrl, temp); +} + +/* Filer rules implementing wol capabilities */ +static void gfar_filer_config_wol(struct gfar_private *priv) +{ + unsigned int i; + u32 rqfcr; + + __gfar_filer_disable(priv); + + /* clear the filer table, reject any packet by default */ + rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH; + for (i = 0; i <= MAX_FILER_IDX; i++) + gfar_write_filer(priv, i, rqfcr, 0); + + i = 0; + if (priv->wol_opts & GFAR_WOL_FILER_UCAST) { + /* unicast packet, accept it */ + struct net_device *ndev = priv->ndev; + /* get the default rx queue index */ + u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex; + u32 dest_mac_addr = (ndev->dev_addr[0] << 16) | + (ndev->dev_addr[1] << 8) | + ndev->dev_addr[2]; + + rqfcr = (qindex << 10) | RQFCR_AND | + RQFCR_CMP_EXACT | RQFCR_PID_DAH; + + gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); + + dest_mac_addr = (ndev->dev_addr[3] << 16) | + (ndev->dev_addr[4] << 8) | + ndev->dev_addr[5]; + rqfcr = (qindex << 10) | RQFCR_GPI | + RQFCR_CMP_EXACT | RQFCR_PID_DAL; + gfar_write_filer(priv, i++, rqfcr, dest_mac_addr); + } + + __gfar_filer_enable(priv); +} + +static void gfar_filer_restore_table(struct gfar_private *priv) +{ + u32 rqfcr, rqfpr; + unsigned int i; + + __gfar_filer_disable(priv); + + for (i = 0; i <= MAX_FILER_IDX; i++) { + rqfcr = priv->ftp_rqfcr[i]; + rqfpr = priv->ftp_rqfpr[i]; + gfar_write_filer(priv, i, rqfcr, rqfpr); + } + + __gfar_filer_enable(priv); +} + +/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */ +static void gfar_start_wol_filer(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + u32 tempval; + int i = 0; + + /* Enable Rx hw queues */ + gfar_write(®s->rqueue, priv->rqueue); + + /* Initialize DMACTRL to have WWR and WOP */ + tempval = gfar_read(®s->dmactrl); + tempval |= DMACTRL_INIT_SETTINGS; + gfar_write(®s->dmactrl, tempval); + + /* Make sure we aren't stopped */ + tempval = gfar_read(®s->dmactrl); + tempval &= ~DMACTRL_GRS; + gfar_write(®s->dmactrl, tempval); + + for (i = 0; i < priv->num_grps; i++) { + regs = priv->gfargrp[i].regs; + /* Clear RHLT, so that the DMA starts polling now */ + gfar_write(®s->rstat, priv->gfargrp[i].rstat); + /* enable the Filer General Purpose Interrupt */ + gfar_write(®s->imask, IMASK_FGPI); + } + + /* Enable Rx DMA */ + tempval = gfar_read(®s->maccfg1); + tempval |= MACCFG1_RX_EN; + gfar_write(®s->maccfg1, tempval); +} + static int gfar_suspend(struct device *dev) { struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned long flags; u32 tempval; + u16 wol = priv->wol_opts; - int magic_packet = priv->wol_en && - (priv->device_flags & - FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + if (!netif_running(ndev)) + return 0; + disable_napi(priv); + netif_tx_lock(ndev); netif_device_detach(ndev); + netif_tx_unlock(ndev); - if (netif_running(ndev)) { + gfar_halt(priv); - local_irq_save_nort(flags); - lock_tx_qs(priv); + if (wol & GFAR_WOL_MAGIC) { + /* Enable interrupt on Magic Packet */ + gfar_write(®s->imask, IMASK_MAG); - gfar_halt_nodisable(priv); + /* Enable Magic Packet mode */ + tempval = gfar_read(®s->maccfg2); + tempval |= MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); - /* Disable Tx, and Rx if wake-on-LAN is disabled. */ + /* re-enable the Rx block */ tempval = gfar_read(®s->maccfg1); - - tempval &= ~MACCFG1_TX_EN; - - if (!magic_packet) - tempval &= ~MACCFG1_RX_EN; - + tempval |= MACCFG1_RX_EN; gfar_write(®s->maccfg1, tempval); - unlock_tx_qs(priv); - local_irq_restore_nort(flags); - - disable_napi(priv); - - if (magic_packet) { - /* Enable interrupt on Magic Packet */ - gfar_write(®s->imask, IMASK_MAG); + } else if (wol & GFAR_WOL_FILER_UCAST) { + gfar_filer_config_wol(priv); + gfar_start_wol_filer(priv); - /* Enable Magic Packet mode */ - tempval = gfar_read(®s->maccfg2); - tempval |= MACCFG2_MPEN; - gfar_write(®s->maccfg2, tempval); - } else { - phy_stop(priv->phydev); - } + } else { + phy_stop(priv->phydev); } return 0; @@ -1581,37 +1647,30 @@ static int gfar_resume(struct device *dev) struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned long flags; u32 tempval; - int magic_packet = priv->wol_en && - (priv->device_flags & - FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + u16 wol = priv->wol_opts; - if (!netif_running(ndev)) { - netif_device_attach(ndev); + if (!netif_running(ndev)) return 0; - } - if (!magic_packet && priv->phydev) - phy_start(priv->phydev); + if (wol & GFAR_WOL_MAGIC) { + /* Disable Magic Packet mode */ + tempval = gfar_read(®s->maccfg2); + tempval &= ~MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); - /* Disable Magic Packet mode, in case something - * else woke us up. - */ - local_irq_save_nort(flags); - lock_tx_qs(priv); + } else if (wol & GFAR_WOL_FILER_UCAST) { + /* need to stop rx only, tx is already down */ + gfar_halt(priv); + gfar_filer_restore_table(priv); - tempval = gfar_read(®s->maccfg2); - tempval &= ~MACCFG2_MPEN; - gfar_write(®s->maccfg2, tempval); + } else { + phy_start(priv->phydev); + } gfar_start(priv); - unlock_tx_qs(priv); - local_irq_restore_nort(flags); - netif_device_attach(ndev); - enable_napi(priv); return 0; @@ -1628,10 +1687,7 @@ static int gfar_restore(struct device *dev) return 0; } - if (gfar_init_bds(ndev)) { - free_skb_resources(priv); - return -ENOMEM; - } + gfar_init_bds(ndev); gfar_mac_reset(priv); @@ -1780,8 +1836,10 @@ static void gfar_configure_serdes(struct net_device *dev) * everything for us? Resetting it takes the link down and requires * several seconds for it to come back. */ - if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) + if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { + put_device(&tbiphy->dev); return; + } /* Single clk mode, mii mode off(for serdes communication) */ phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); @@ -1793,6 +1851,8 @@ static void gfar_configure_serdes(struct net_device *dev) phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); + + put_device(&tbiphy->dev); } static int __gfar_is_rx_idle(struct gfar_private *priv) @@ -1922,26 +1982,32 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) { - struct rxbd8 *rxbdp; - struct gfar_private *priv = netdev_priv(rx_queue->dev); int i; - rxbdp = rx_queue->rx_bd_base; + struct rxbd8 *rxbdp = rx_queue->rx_bd_base; + + if (rx_queue->skb) + dev_kfree_skb(rx_queue->skb); for (i = 0; i < rx_queue->rx_ring_size; i++) { - if (rx_queue->rx_skbuff[i]) { - dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr), - priv->rx_buffer_size, - DMA_FROM_DEVICE); - dev_kfree_skb_any(rx_queue->rx_skbuff[i]); - rx_queue->rx_skbuff[i] = NULL; - } + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; + rxbdp->lstatus = 0; rxbdp->bufPtr = 0; rxbdp++; + + if (!rxb->page) + continue; + + dma_unmap_single(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(rxb->page); + + rxb->page = NULL; } - kfree(rx_queue->rx_skbuff); - rx_queue->rx_skbuff = NULL; + + kfree(rx_queue->rx_buff); + rx_queue->rx_buff = NULL; } /* If there are any tx skbs or rx skbs still around, free them. @@ -1966,7 +2032,7 @@ static void free_skb_resources(struct gfar_private *priv) for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - if (rx_queue->rx_skbuff) + if (rx_queue->rx_buff) free_skb_rx_queue(rx_queue); } @@ -2042,6 +2108,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) goto err_irq_fail; } + enable_irq_wake(gfar_irq(grp, ER)->irq); + err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, gfar_irq(grp, TX)->name, grp); if (err < 0) { @@ -2056,6 +2124,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) gfar_irq(grp, RX)->irq); goto rx_irq_fail; } + enable_irq_wake(gfar_irq(grp, RX)->irq); + } else { err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, gfar_irq(grp, TX)->name, grp); @@ -2064,6 +2134,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) gfar_irq(grp, TX)->irq); goto err_irq_fail; } + enable_irq_wake(gfar_irq(grp, TX)->irq); } return 0; @@ -2129,6 +2200,11 @@ int startup_gfar(struct net_device *ndev) /* Start Rx/Tx DMA and enable the interrupts */ gfar_start(priv); + /* force link state update after mac reset */ + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + phy_start(priv->phydev); enable_napi(priv); @@ -2158,8 +2234,6 @@ static int gfar_enet_open(struct net_device *dev) if (err) return err; - device_set_wakeup_enable(&dev->dev, priv->wol_en); - return err; } @@ -2254,7 +2328,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) int i, rq = 0; int do_tstamp, do_csum, do_vlan; u32 bufaddr; - unsigned long flags; unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0; rq = skb->queue_mapping; @@ -2434,19 +2507,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(txq, bytes_sent); - /* We can work in parallel with gfar_clean_tx_ring(), except - * when modifying num_txbdfree. Note that we didn't grab the lock - * when we were reading the num_txbdfree and checking for available - * space, that's because outside of this function it can only grow, - * and once we've got needed space, it cannot suddenly disappear. - * - * The lock also protects us from gfar_error(), which can modify - * regs->tstat and thus retrigger the transfers, which is why we - * also must grab the lock before setting ready bit for the first - * to be transmitted BD. - */ - spin_lock_irqsave(&tx_queue->txlock, flags); - gfar_wmb(); txbdp_start->lstatus = cpu_to_be32(lstatus); @@ -2463,8 +2523,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); + /* We can work in parallel with gfar_clean_tx_ring(), except + * when modifying num_txbdfree. Note that we didn't grab the lock + * when we were reading the num_txbdfree and checking for available + * space, that's because outside of this function it can only grow. + */ + spin_lock_bh(&tx_queue->txlock); /* reduce TxBD free count */ tx_queue->num_txbdfree -= (nr_txbds); + spin_unlock_bh(&tx_queue->txlock); /* If the next BD still needs to be cleaned up, then the bds * are full. We need to tell the kernel to stop sending us stuff. @@ -2478,9 +2545,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Tell the DMA to go go go */ gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); - /* Unlock priv */ - spin_unlock_irqrestore(&tx_queue->txlock, flags); - return NETDEV_TX_OK; dma_map_err: @@ -2534,7 +2598,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) struct gfar_private *priv = netdev_priv(dev); int frame_size = new_mtu + ETH_HLEN; - if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { + if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) { netif_err(priv, drv, dev, "Invalid MTU setting\n"); return -EINVAL; } @@ -2588,15 +2652,6 @@ static void gfar_timeout(struct net_device *dev) schedule_work(&priv->reset_task); } -static void gfar_align_skb(struct sk_buff *skb) -{ - /* We need the data buffer to be aligned properly. We will reserve - * as many bytes as needed to align the data properly - */ - skb_reserve(skb, RXBUF_ALIGNMENT - - (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); -} - /* Interrupt Handler for Transmit complete */ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { @@ -2622,7 +2677,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) skb_dirtytx = tx_queue->skb_dirtytx; while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { - unsigned long flags; frags = skb_shinfo(skb)->nr_frags; @@ -2655,7 +2709,8 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { struct skb_shared_hwtstamps shhwtstamps; - u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); + u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & + ~0x7UL); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(*ns); @@ -2686,9 +2741,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) TX_RING_MOD_MASK(tx_ring_size); howmany++; - spin_lock_irqsave(&tx_queue->txlock, flags); + spin_lock(&tx_queue->txlock); tx_queue->num_txbdfree += nr_txbds; - spin_unlock_irqrestore(&tx_queue->txlock, flags); + spin_unlock(&tx_queue->txlock); } /* If we freed a buffer, we can restart transmission, if necessary */ @@ -2704,49 +2759,85 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) netdev_tx_completed_queue(txq, howmany, bytes_sent); } -static struct sk_buff *gfar_alloc_skb(struct net_device *dev) +static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb; + struct page *page; + dma_addr_t addr; - skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); - if (!skb) - return NULL; + page = dev_alloc_page(); + if (unlikely(!page)) + return false; - gfar_align_skb(skb); + addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rxq->dev, addr))) { + __free_page(page); - return skb; + return false; + } + + rxb->dma = addr; + rxb->page = page; + rxb->page_offset = 0; + + return true; } -static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr) +static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb; - dma_addr_t addr; + struct gfar_private *priv = netdev_priv(rx_queue->ndev); + struct gfar_extra_stats *estats = &priv->extra_stats; - skb = gfar_alloc_skb(dev); - if (!skb) - return NULL; + netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); + atomic64_inc(&estats->rx_alloc_err); +} - addr = dma_map_single(priv->dev, skb->data, - priv->rx_buffer_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(priv->dev, addr))) { - dev_kfree_skb_any(skb); - return NULL; +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, + int alloc_cnt) +{ + struct rxbd8 *bdp; + struct gfar_rx_buff *rxb; + int i; + + i = rx_queue->next_to_use; + bdp = &rx_queue->rx_bd_base[i]; + rxb = &rx_queue->rx_buff[i]; + + while (alloc_cnt--) { + /* try reuse page */ + if (unlikely(!rxb->page)) { + if (unlikely(!gfar_new_page(rx_queue, rxb))) { + gfar_rx_alloc_err(rx_queue); + break; + } + } + + /* Setup the new RxBD */ + gfar_init_rxbdp(rx_queue, bdp, + rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); + + /* Update to the next pointer */ + bdp++; + rxb++; + + if (unlikely(++i == rx_queue->rx_ring_size)) { + i = 0; + bdp = rx_queue->rx_bd_base; + rxb = rx_queue->rx_buff; + } } - *bufaddr = addr; - return skb; + rx_queue->next_to_use = i; + rx_queue->next_to_alloc = i; } -static inline void count_errors(unsigned short status, struct net_device *dev) +static void count_errors(u32 lstatus, struct net_device *ndev) { - struct gfar_private *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; + struct gfar_private *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; struct gfar_extra_stats *estats = &priv->extra_stats; /* If the packet was truncated, none of the other errors matter */ - if (status & RXBD_TRUNCATED) { + if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { stats->rx_length_errors++; atomic64_inc(&estats->rx_trunc); @@ -2754,25 +2845,25 @@ static inline void count_errors(unsigned short status, struct net_device *dev) return; } /* Count the errors, if there were any */ - if (status & (RXBD_LARGE | RXBD_SHORT)) { + if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { stats->rx_length_errors++; - if (status & RXBD_LARGE) + if (lstatus & BD_LFLAG(RXBD_LARGE)) atomic64_inc(&estats->rx_large); else atomic64_inc(&estats->rx_short); } - if (status & RXBD_NONOCTET) { + if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { stats->rx_frame_errors++; atomic64_inc(&estats->rx_nonoctet); } - if (status & RXBD_CRCERR) { + if (lstatus & BD_LFLAG(RXBD_CRCERR)) { atomic64_inc(&estats->rx_crcerr); stats->rx_crc_errors++; } - if (status & RXBD_OVERRUN) { + if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { atomic64_inc(&estats->rx_overrun); - stats->rx_crc_errors++; + stats->rx_over_errors++; } } @@ -2780,7 +2871,14 @@ irqreturn_t gfar_receive(int irq, void *grp_id) { struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id; unsigned long flags; - u32 imask; + u32 imask, ievent; + + ievent = gfar_read(&grp->regs->ievent); + + if (unlikely(ievent & IEVENT_FGPI)) { + gfar_write(&grp->regs->ievent, IEVENT_FGPI); + return IRQ_HANDLED; + } if (likely(napi_schedule_prep(&grp->napi_rx))) { spin_lock_irqsave(&grp->grplock, flags); @@ -2823,6 +2921,93 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id) return IRQ_HANDLED; } +static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, + struct sk_buff *skb, bool first) +{ + unsigned int size = lstatus & BD_LENGTH_MASK; + struct page *page = rxb->page; + + /* Remove the FCS from the packet length */ + if (likely(lstatus & BD_LFLAG(RXBD_LAST))) + size -= ETH_FCS_LEN; + + if (likely(first)) + skb_put(skb, size); + else + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rxb->page_offset + RXBUF_ALIGNMENT, + size, GFAR_RXB_TRUESIZE); + + /* try reuse page */ + if (unlikely(page_count(page) != 1)) + return false; + + /* change offset to the other half */ + rxb->page_offset ^= GFAR_RXB_TRUESIZE; + + atomic_inc(&page->_count); + + return true; +} + +static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, + struct gfar_rx_buff *old_rxb) +{ + struct gfar_rx_buff *new_rxb; + u16 nta = rxq->next_to_alloc; + + new_rxb = &rxq->rx_buff[nta]; + + /* find next buf that can reuse a page */ + nta++; + rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; + + /* copy page reference */ + *new_rxb = *old_rxb; + + /* sync for use by the device */ + dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, + old_rxb->page_offset, + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); +} + +static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, + u32 lstatus, struct sk_buff *skb) +{ + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; + struct page *page = rxb->page; + bool first = false; + + if (likely(!skb)) { + void *buff_addr = page_address(page) + rxb->page_offset; + + skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); + if (unlikely(!skb)) { + gfar_rx_alloc_err(rx_queue); + return NULL; + } + skb_reserve(skb, RXBUF_ALIGNMENT); + first = true; + } + + dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); + + if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { + /* reuse the free half of the page */ + gfar_reuse_rx_page(rx_queue, rxb); + } else { + /* page cannot be reused, unmap it */ + dma_unmap_page(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear rxb content */ + rxb->page = NULL; + + return skb; +} + static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) { /* If valid headers were found, and valid sums @@ -2837,10 +3022,9 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) } /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ -static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull, struct napi_struct *napi) +static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) { - struct gfar_private *priv = netdev_priv(dev); + struct gfar_private *priv = netdev_priv(ndev); struct rxfcb *fcb = NULL; /* fcb is at the beginning if exists */ @@ -2849,10 +3033,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, /* Remove the FCB from the skb * Remove the padded bytes, if there are any */ - if (amount_pull) { - skb_record_rx_queue(skb, fcb->rq); - skb_pull(skb, amount_pull); - } + if (priv->uses_rxfcb) + skb_pull(skb, GMAC_FCB_LEN); /* Get receive timestamp from the skb */ if (priv->hwts_rx_en) { @@ -2866,24 +3048,20 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, if (priv->padding) skb_pull(skb, priv->padding); - if (dev->features & NETIF_F_RXCSUM) + if (ndev->features & NETIF_F_RXCSUM) gfar_rx_checksum(skb, fcb); /* Tell the skb what kind of packet this is */ - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, ndev); /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. * Even if vlan rx accel is disabled, on some chips * RXFCB_VLN is pseudo randomly set. */ - if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && be16_to_cpu(fcb->flags) & RXFCB_VLN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(fcb->vlctl)); - - /* Send the packet up the stack */ - napi_gro_receive(napi, skb); - } /* gfar_clean_rx_ring() -- Processes each frame in the rx ring @@ -2892,91 +3070,89 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, */ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) { - struct net_device *dev = rx_queue->dev; - struct rxbd8 *bdp, *base; - struct sk_buff *skb; - int pkt_len; - int amount_pull; - int howmany = 0; - struct gfar_private *priv = netdev_priv(dev); + struct net_device *ndev = rx_queue->ndev; + struct gfar_private *priv = netdev_priv(ndev); + struct rxbd8 *bdp; + int i, howmany = 0; + struct sk_buff *skb = rx_queue->skb; + int cleaned_cnt = gfar_rxbd_unused(rx_queue); + unsigned int total_bytes = 0, total_pkts = 0; /* Get the first full descriptor */ - bdp = rx_queue->cur_rx; - base = rx_queue->rx_bd_base; + i = rx_queue->next_to_clean; + + while (rx_work_limit--) { + u32 lstatus; - amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; + if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); + cleaned_cnt = 0; + } - while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) { - struct sk_buff *newskb; - dma_addr_t bufaddr; + bdp = &rx_queue->rx_bd_base[i]; + lstatus = be32_to_cpu(bdp->lstatus); + if (lstatus & BD_LFLAG(RXBD_EMPTY)) + break; + /* order rx buffer descriptor reads */ rmb(); - /* Add another skb for the future */ - newskb = gfar_new_skb(dev, &bufaddr); + /* fetch next to clean buffer from the ring */ + skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); + if (unlikely(!skb)) + break; - skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; + cleaned_cnt++; + howmany++; - dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), - priv->rx_buffer_size, DMA_FROM_DEVICE); - - if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) && - be16_to_cpu(bdp->length) > priv->rx_buffer_size)) - bdp->status = cpu_to_be16(RXBD_LARGE); - - /* We drop the frame if we failed to allocate a new buffer */ - if (unlikely(!newskb || - !(be16_to_cpu(bdp->status) & RXBD_LAST) || - be16_to_cpu(bdp->status) & RXBD_ERR)) { - count_errors(be16_to_cpu(bdp->status), dev); - - if (unlikely(!newskb)) { - newskb = skb; - bufaddr = be32_to_cpu(bdp->bufPtr); - } else if (skb) - dev_kfree_skb(skb); - } else { - /* Increment the number of packets */ - rx_queue->stats.rx_packets++; - howmany++; - - if (likely(skb)) { - pkt_len = be16_to_cpu(bdp->length) - - ETH_FCS_LEN; - /* Remove the FCS from the packet length */ - skb_put(skb, pkt_len); - rx_queue->stats.rx_bytes += pkt_len; - skb_record_rx_queue(skb, rx_queue->qindex); - gfar_process_frame(dev, skb, amount_pull, - &rx_queue->grp->napi_rx); + if (unlikely(++i == rx_queue->rx_ring_size)) + i = 0; - } else { - netif_warn(priv, rx_err, dev, "Missing skb!\n"); - rx_queue->stats.rx_dropped++; - atomic64_inc(&priv->extra_stats.rx_skbmissing); - } + rx_queue->next_to_clean = i; + + /* fetch next buffer if not the last in frame */ + if (!(lstatus & BD_LFLAG(RXBD_LAST))) + continue; + if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { + count_errors(lstatus, ndev); + + /* discard faulty buffer */ + dev_kfree_skb(skb); + skb = NULL; + rx_queue->stats.rx_dropped++; + continue; } - rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; + /* Increment the number of packets */ + total_pkts++; + total_bytes += skb->len; - /* Setup the new bdp */ - gfar_init_rxbdp(rx_queue, bdp, bufaddr); + skb_record_rx_queue(skb, rx_queue->qindex); - /* Update Last Free RxBD pointer for LFC */ - if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) - gfar_write(rx_queue->rfbptr, (u32)bdp); + gfar_process_frame(ndev, skb); - /* Update to the next pointer */ - bdp = next_bd(bdp, base, rx_queue->rx_ring_size); + /* Send the packet up the stack */ + napi_gro_receive(&rx_queue->grp->napi_rx, skb); - /* update to point at the next skb */ - rx_queue->skb_currx = (rx_queue->skb_currx + 1) & - RX_RING_MOD_MASK(rx_queue->rx_ring_size); + skb = NULL; } - /* Update the current rxbd pointer to be the next one */ - rx_queue->cur_rx = bdp; + /* Store incomplete frames for completion */ + rx_queue->skb = skb; + + rx_queue->stats.rx_packets += total_pkts; + rx_queue->stats.rx_bytes += total_bytes; + + if (cleaned_cnt) + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); + + /* Update Last Free RxBD pointer for LFC */ + if (unlikely(priv->tx_actual_en)) { + u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); + + gfar_write(rx_queue->rfbptr, bdp_dma); + } return howmany; } @@ -3411,30 +3587,19 @@ static irqreturn_t gfar_error(int irq, void *grp_id) if (events & IEVENT_CRL) dev->stats.tx_aborted_errors++; if (events & IEVENT_XFUN) { - unsigned long flags; - netif_dbg(priv, tx_err, dev, "TX FIFO underrun, packet dropped\n"); dev->stats.tx_dropped++; atomic64_inc(&priv->extra_stats.tx_underrun); - local_irq_save_nort(flags); - lock_tx_qs(priv); - - /* Reactivate the Tx Queues */ - gfar_write(®s->tstat, gfargrp->tstat); - - unlock_tx_qs(priv); - local_irq_restore_nort(flags); + schedule_work(&priv->reset_task); } netif_dbg(priv, tx_err, dev, "Transmit Error\n"); } if (events & IEVENT_BSY) { - dev->stats.rx_errors++; + dev->stats.rx_over_errors++; atomic64_inc(&priv->extra_stats.rx_bsy); - gfar_receive(irq, grp_id); - netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n", gfar_read(®s->rstat)); } @@ -3503,7 +3668,6 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) struct phy_device *phydev = priv->phydev; struct gfar_priv_rx_q *rx_queue = NULL; int i; - struct rxbd8 *bdp; if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) return; @@ -3560,15 +3724,11 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) /* Turn last free buffer recording on */ if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { for (i = 0; i < priv->num_rx_queues; i++) { + u32 bdp_dma; + rx_queue = priv->rx_queue[i]; - bdp = rx_queue->cur_rx; - /* skip to previous bd */ - bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1, - rx_queue->rx_bd_base, - rx_queue->rx_ring_size); - - if (rx_queue->rfbptr) - gfar_write(rx_queue->rfbptr, (u32)bdp); + bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); + gfar_write(rx_queue->rfbptr, bdp_dma); } priv->tx_actual_en = 1; diff --git a/kernel/drivers/net/ethernet/freescale/gianfar.h b/kernel/drivers/net/ethernet/freescale/gianfar.h index daa1d37de..cb7766797 100644 --- a/kernel/drivers/net/ethernet/freescale/gianfar.h +++ b/kernel/drivers/net/ethernet/freescale/gianfar.h @@ -71,11 +71,6 @@ struct ethtool_rx_list { /* Number of bytes to align the rx bufs to */ #define RXBUF_ALIGNMENT 64 -/* The number of bytes which composes a unit for the purpose of - * allocating data buffers. ie-for any given MTU, the data buffer - * will be the next highest multiple of 512 bytes. */ -#define INCREMENTAL_BUFFER_SIZE 512 - #define PHY_INIT_TIMEOUT 100000 #define DRV_NAME "gfar-enet" @@ -92,6 +87,8 @@ extern const char gfar_driver_version[]; #define DEFAULT_TX_RING_SIZE 256 #define DEFAULT_RX_RING_SIZE 256 +#define GFAR_RX_BUFF_ALLOC 16 + #define GFAR_RX_MAX_RING_SIZE 256 #define GFAR_TX_MAX_RING_SIZE 256 @@ -103,11 +100,14 @@ extern const char gfar_driver_version[]; #define DEFAULT_RX_LFC_THR 16 #define DEFAULT_LFC_PTVVAL 4 -#define DEFAULT_RX_BUFFER_SIZE 1536 +#define GFAR_RXB_SIZE 1536 +#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \ + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define GFAR_RXB_TRUESIZE 2048 + #define TX_RING_MOD_MASK(size) (size-1) #define RX_RING_MOD_MASK(size) (size-1) -#define JUMBO_BUFFER_SIZE 9728 -#define JUMBO_FRAME_SIZE 9600 +#define GFAR_JUMBO_FRAME_SIZE 9600 #define DEFAULT_FIFO_TX_THR 0x100 #define DEFAULT_FIFO_TX_STARVE 0x40 @@ -340,6 +340,7 @@ extern const char gfar_driver_version[]; #define IEVENT_MAG 0x00000800 #define IEVENT_GRSC 0x00000100 #define IEVENT_RXF0 0x00000080 +#define IEVENT_FGPI 0x00000010 #define IEVENT_FIR 0x00000008 #define IEVENT_FIQ 0x00000004 #define IEVENT_DPE 0x00000002 @@ -372,6 +373,7 @@ extern const char gfar_driver_version[]; #define IMASK_MAG 0x00000800 #define IMASK_GRSC 0x00000100 #define IMASK_RXFEN0 0x00000080 +#define IMASK_FGPI 0x00000010 #define IMASK_FIR 0x00000008 #define IMASK_FIQ 0x00000004 #define IMASK_DPE 0x00000002 @@ -540,6 +542,9 @@ extern const char gfar_driver_version[]; #define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */ +#define GFAR_WOL_MAGIC 0x00000001 +#define GFAR_WOL_FILER_UCAST 0x00000002 + struct txbd8 { union { @@ -640,6 +645,7 @@ struct rmon_mib }; struct gfar_extra_stats { + atomic64_t rx_alloc_err; atomic64_t rx_large; atomic64_t rx_short; atomic64_t rx_nonoctet; @@ -651,7 +657,6 @@ struct gfar_extra_stats { atomic64_t eberr; atomic64_t tx_babt; atomic64_t tx_underrun; - atomic64_t rx_skbmissing; atomic64_t tx_timeout; }; @@ -917,6 +922,8 @@ struct gfar { #define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 #define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 +#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000 +#define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000 #if (MAXGROUPS == 2) #define DEFAULT_MAPPING 0xAA @@ -1012,34 +1019,42 @@ struct rx_q_stats { unsigned long rx_dropped; }; +struct gfar_rx_buff { + dma_addr_t dma; + struct page *page; + unsigned int page_offset; +}; + /** * struct gfar_priv_rx_q - per rx queue structure - * @rx_skbuff: skb pointers - * @skb_currx: currently use skb pointer + * @rx_buff: Array of buffer info metadata structs * @rx_bd_base: First rx buffer descriptor - * @cur_rx: Next free rx ring entry + * @next_to_use: index of the next buffer to be alloc'd + * @next_to_clean: index of the next buffer to be cleaned * @qindex: index of this queue - * @dev: back pointer to the dev structure + * @ndev: back pointer to net_device * @rx_ring_size: Rx ring size * @rxcoalescing: enable/disable rx-coalescing * @rxic: receive interrupt coalescing vlaue */ struct gfar_priv_rx_q { - struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES); - dma_addr_t rx_bd_dma_base; + struct gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES); struct rxbd8 *rx_bd_base; - struct rxbd8 *cur_rx; - struct net_device *dev; - struct gfar_priv_grp *grp; + struct net_device *ndev; + struct device *dev; + u16 rx_ring_size; + u16 qindex; + struct gfar_priv_grp *grp; + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + struct sk_buff *skb; struct rx_q_stats stats; - u16 skb_currx; - u16 qindex; - unsigned int rx_ring_size; - /* RX Coalescing values */ + u32 __iomem *rfbptr; unsigned char rxcoalescing; unsigned long rxic; - u32 __iomem *rfbptr; + dma_addr_t rx_bd_dma_base; }; enum gfar_irqinfo_id { @@ -1109,7 +1124,6 @@ struct gfar_private { struct device *dev; struct net_device *ndev; enum gfar_errata errata; - unsigned int rx_buffer_size; u16 uses_rxfcb; u16 padding; @@ -1145,9 +1159,6 @@ struct gfar_private { int oldduplex; int oldlink; - /* Bitfield update lock */ - spinlock_t bflock; - uint32_t msg_enable; struct work_struct reset_task; @@ -1157,8 +1168,6 @@ struct gfar_private { extended_hash:1, bd_stash_en:1, rx_filer_enable:1, - /* Wake-on-LAN enabled */ - wol_en:1, /* Enable priorty based Tx scheduling in Hw */ prio_sched_en:1, /* Flow control flags */ @@ -1187,6 +1196,10 @@ struct gfar_private { u32 __iomem *hash_regs[16]; int hash_width; + /* wake-on-lan settings */ + u16 wol_opts; + u16 wol_supported; + /*Filer table*/ unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; @@ -1295,6 +1308,28 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp) bdp->lstatus = cpu_to_be32(lstatus); } +static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq) +{ + if (rxq->next_to_clean > rxq->next_to_use) + return rxq->next_to_clean - rxq->next_to_use - 1; + + return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1; +} + +static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq) +{ + struct rxbd8 *bdp; + u32 bdp_dma; + int i; + + i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1; + bdp = &rxq->rx_bd_base[i]; + bdp_dma = lower_32_bits(rxq->rx_bd_dma_base); + bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base; + + return bdp_dma; +} + irqreturn_t gfar_receive(int irq, void *dev_id); int startup_gfar(struct net_device *dev); void stop_gfar(struct net_device *dev); diff --git a/kernel/drivers/net/ethernet/freescale/gianfar_ethtool.c b/kernel/drivers/net/ethernet/freescale/gianfar_ethtool.c index fda12fb32..4b0ee855e 100644 --- a/kernel/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/kernel/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -61,6 +61,8 @@ static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); static const char stat_gstrings[][ETH_GSTRING_LEN] = { + /* extra stats */ + "rx-allocation-errors", "rx-large-frame-errors", "rx-short-frame-errors", "rx-non-octet-errors", @@ -72,8 +74,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = { "ethernet-bus-error", "tx-babbling-errors", "tx-underrun-errors", - "rx-skb-missing-errors", "tx-timeout-errors", + /* rmon stats */ "tx-rx-64-frames", "tx-rx-65-127-frames", "tx-rx-128-255-frames", @@ -180,8 +182,6 @@ static void gfar_gdrvinfo(struct net_device *dev, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info)); - drvinfo->regdump_len = 0; - drvinfo->eedump_len = 0; } @@ -642,31 +642,49 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gfar_private *priv = netdev_priv(dev); - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) { - wol->supported = WAKE_MAGIC; - wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0; - } else { - wol->supported = wol->wolopts = 0; - } + wol->supported = 0; + wol->wolopts = 0; + + if (priv->wol_supported & GFAR_WOL_MAGIC) + wol->supported |= WAKE_MAGIC; + + if (priv->wol_supported & GFAR_WOL_FILER_UCAST) + wol->supported |= WAKE_UCAST; + + if (priv->wol_opts & GFAR_WOL_MAGIC) + wol->wolopts |= WAKE_MAGIC; + + if (priv->wol_opts & GFAR_WOL_FILER_UCAST) + wol->wolopts |= WAKE_UCAST; } static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gfar_private *priv = netdev_priv(dev); - unsigned long flags; + u16 wol_opts = 0; + int err; - if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && - wol->wolopts != 0) + if (!priv->wol_supported && wol->wolopts) return -EINVAL; - if (wol->wolopts & ~WAKE_MAGIC) + if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST)) return -EINVAL; - device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); + if (wol->wolopts & WAKE_MAGIC) { + wol_opts |= GFAR_WOL_MAGIC; + } else { + if (wol->wolopts & WAKE_UCAST) + wol_opts |= GFAR_WOL_FILER_UCAST; + } - spin_lock_irqsave(&priv->bflock, flags); - priv->wol_en = !!device_may_wakeup(&dev->dev); - spin_unlock_irqrestore(&priv->bflock, flags); + wol_opts &= priv->wol_supported; + priv->wol_opts = 0; + + err = device_set_wakeup_enable(priv->dev, wol_opts); + if (err) + return err; + + priv->wol_opts = wol_opts; return 0; } @@ -677,14 +695,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow) u32 fcr = 0x0, fpr = FPR_FILER_MASK; if (ethflow & RXH_L2DA) { - fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | + fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH | RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); priv->cur_filer_idx = priv->cur_filer_idx - 1; - fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | + fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH | RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; priv->ftp_rqfpr[priv->cur_filer_idx] = fpr; priv->ftp_rqfcr[priv->cur_filer_idx] = fcr; @@ -903,27 +921,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv) return 0; } -static int gfar_comp_asc(const void *a, const void *b) -{ - return memcmp(a, b, 4); -} - -static int gfar_comp_desc(const void *a, const void *b) -{ - return -memcmp(a, b, 4); -} - -static void gfar_swap(void *a, void *b, int size) -{ - u32 *_a = a; - u32 *_b = b; - - swap(_a[0], _b[0]); - swap(_a[1], _b[1]); - swap(_a[2], _b[2]); - swap(_a[3], _b[3]); -} - /* Write a mask to filer cache */ static void gfar_set_mask(u32 mask, struct filer_table *tab) { @@ -1273,310 +1270,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, return 0; } -/* Copy size filer entries */ -static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], - struct gfar_filer_entry src[0], s32 size) -{ - while (size > 0) { - size--; - dst[size].ctrl = src[size].ctrl; - dst[size].prop = src[size].prop; - } -} - -/* Delete the contents of the filer-table between start and end - * and collapse them - */ -static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) -{ - int length; - - if (end > MAX_FILER_CACHE_IDX || end < begin) - return -EINVAL; - - end++; - length = end - begin; - - /* Copy */ - while (end < tab->index) { - tab->fe[begin].ctrl = tab->fe[end].ctrl; - tab->fe[begin++].prop = tab->fe[end++].prop; - - } - /* Fill up with don't cares */ - while (begin < tab->index) { - tab->fe[begin].ctrl = 0x60; - tab->fe[begin].prop = 0xFFFFFFFF; - begin++; - } - - tab->index -= length; - return 0; -} - -/* Make space on the wanted location */ -static int gfar_expand_filer_entries(u32 begin, u32 length, - struct filer_table *tab) -{ - if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || - begin > MAX_FILER_CACHE_IDX) - return -EINVAL; - - gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), - tab->index - length + 1); - - tab->index += length; - return 0; -} - -static int gfar_get_next_cluster_start(int start, struct filer_table *tab) -{ - for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); - start++) { - if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == - (RQFCR_AND | RQFCR_CLE)) - return start; - } - return -1; -} - -static int gfar_get_next_cluster_end(int start, struct filer_table *tab) -{ - for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); - start++) { - if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == - (RQFCR_CLE)) - return start; - } - return -1; -} - -/* Uses hardwares clustering option to reduce - * the number of filer table entries - */ -static void gfar_cluster_filer(struct filer_table *tab) -{ - s32 i = -1, j, iend, jend; - - while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { - j = i; - while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { - /* The cluster entries self and the previous one - * (a mask) must be identical! - */ - if (tab->fe[i].ctrl != tab->fe[j].ctrl) - break; - if (tab->fe[i].prop != tab->fe[j].prop) - break; - if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl) - break; - if (tab->fe[i - 1].prop != tab->fe[j - 1].prop) - break; - iend = gfar_get_next_cluster_end(i, tab); - jend = gfar_get_next_cluster_end(j, tab); - if (jend == -1 || iend == -1) - break; - - /* First we make some free space, where our cluster - * element should be. Then we copy it there and finally - * delete in from its old location. - */ - if (gfar_expand_filer_entries(iend, (jend - j), tab) == - -EINVAL) - break; - - gfar_copy_filer_entries(&(tab->fe[iend + 1]), - &(tab->fe[jend + 1]), jend - j); - - if (gfar_trim_filer_entries(jend - 1, - jend + (jend - j), - tab) == -EINVAL) - return; - - /* Mask out cluster bit */ - tab->fe[iend].ctrl &= ~(RQFCR_CLE); - } - } -} - -/* Swaps the masked bits of a1<>a2 and b1<>b2 */ -static void gfar_swap_bits(struct gfar_filer_entry *a1, - struct gfar_filer_entry *a2, - struct gfar_filer_entry *b1, - struct gfar_filer_entry *b2, u32 mask) -{ - u32 temp[4]; - temp[0] = a1->ctrl & mask; - temp[1] = a2->ctrl & mask; - temp[2] = b1->ctrl & mask; - temp[3] = b2->ctrl & mask; - - a1->ctrl &= ~mask; - a2->ctrl &= ~mask; - b1->ctrl &= ~mask; - b2->ctrl &= ~mask; - - a1->ctrl |= temp[1]; - a2->ctrl |= temp[0]; - b1->ctrl |= temp[3]; - b2->ctrl |= temp[2]; -} - -/* Generate a list consisting of masks values with their start and - * end of validity and block as indicator for parts belonging - * together (glued by ANDs) in mask_table - */ -static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, - struct filer_table *tab) -{ - u32 i, and_index = 0, block_index = 1; - - for (i = 0; i < tab->index; i++) { - - /* LSByte of control = 0 sets a mask */ - if (!(tab->fe[i].ctrl & 0xF)) { - mask_table[and_index].mask = tab->fe[i].prop; - mask_table[and_index].start = i; - mask_table[and_index].block = block_index; - if (and_index >= 1) - mask_table[and_index - 1].end = i - 1; - and_index++; - } - /* cluster starts and ends will be separated because they should - * hold their position - */ - if (tab->fe[i].ctrl & RQFCR_CLE) - block_index++; - /* A not set AND indicates the end of a depended block */ - if (!(tab->fe[i].ctrl & RQFCR_AND)) - block_index++; - } - - mask_table[and_index - 1].end = i - 1; - - return and_index; -} - -/* Sorts the entries of mask_table by the values of the masks. - * Important: The 0xFF80 flags of the first and last entry of a - * block must hold their position (which queue, CLusterEnable, ReJEct, - * AND) - */ -static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, - struct filer_table *temp_table, u32 and_index) -{ - /* Pointer to compare function (_asc or _desc) */ - int (*gfar_comp)(const void *, const void *); - - u32 i, size = 0, start = 0, prev = 1; - u32 old_first, old_last, new_first, new_last; - - gfar_comp = &gfar_comp_desc; - - for (i = 0; i < and_index; i++) { - if (prev != mask_table[i].block) { - old_first = mask_table[start].start + 1; - old_last = mask_table[i - 1].end; - sort(mask_table + start, size, - sizeof(struct gfar_mask_entry), - gfar_comp, &gfar_swap); - - /* Toggle order for every block. This makes the - * thing more efficient! - */ - if (gfar_comp == gfar_comp_desc) - gfar_comp = &gfar_comp_asc; - else - gfar_comp = &gfar_comp_desc; - - new_first = mask_table[start].start + 1; - new_last = mask_table[i - 1].end; - - gfar_swap_bits(&temp_table->fe[new_first], - &temp_table->fe[old_first], - &temp_table->fe[new_last], - &temp_table->fe[old_last], - RQFCR_QUEUE | RQFCR_CLE | - RQFCR_RJE | RQFCR_AND); - - start = i; - size = 0; - } - size++; - prev = mask_table[i].block; - } -} - -/* Reduces the number of masks needed in the filer table to save entries - * This is done by sorting the masks of a depended block. A depended block is - * identified by gluing ANDs or CLE. The sorting order toggles after every - * block. Of course entries in scope of a mask must change their location with - * it. - */ -static int gfar_optimize_filer_masks(struct filer_table *tab) -{ - struct filer_table *temp_table; - struct gfar_mask_entry *mask_table; - - u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0; - s32 ret = 0; - - /* We need a copy of the filer table because - * we want to change its order - */ - temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); - if (temp_table == NULL) - return -ENOMEM; - - mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, - sizeof(struct gfar_mask_entry), GFP_KERNEL); - - if (mask_table == NULL) { - ret = -ENOMEM; - goto end; - } - - and_index = gfar_generate_mask_table(mask_table, tab); - - gfar_sort_mask_table(mask_table, temp_table, and_index); - - /* Now we can copy the data from our duplicated filer table to - * the real one in the order the mask table says - */ - for (i = 0; i < and_index; i++) { - size = mask_table[i].end - mask_table[i].start + 1; - gfar_copy_filer_entries(&(tab->fe[j]), - &(temp_table->fe[mask_table[i].start]), size); - j += size; - } - - /* And finally we just have to check for duplicated masks and drop the - * second ones - */ - for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { - if (tab->fe[i].ctrl == 0x80) { - previous_mask = i++; - break; - } - } - for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { - if (tab->fe[i].ctrl == 0x80) { - if (tab->fe[i].prop == tab->fe[previous_mask].prop) { - /* Two identical ones found! - * So drop the second one! - */ - gfar_trim_filer_entries(i, i, tab); - } else - /* Not identical! */ - previous_mask = i; - } - } - - kfree(mask_table); -end: kfree(temp_table); - return ret; -} - /* Write the bit-pattern from software's buffer to hardware registers */ static int gfar_write_filer_table(struct gfar_private *priv, struct filer_table *tab) @@ -1586,11 +1279,10 @@ static int gfar_write_filer_table(struct gfar_private *priv, return -EBUSY; /* Fill regular entries */ - for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); - i++) + for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++) gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); /* Fill the rest with fall-troughs */ - for (; i < MAX_FILER_IDX - 1; i++) + for (; i < MAX_FILER_IDX; i++) gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); /* Last entry must be default accept * because that's what people expect @@ -1624,7 +1316,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv) { struct ethtool_flow_spec_container *j; struct filer_table *tab; - s32 i = 0; s32 ret = 0; /* So index is set to zero, too! */ @@ -1649,17 +1340,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv) } } - i = tab->index; - - /* Optimizations to save entries */ - gfar_cluster_filer(tab); - gfar_optimize_filer_masks(tab); - - pr_debug("\tSummary:\n" - "\tData on hardware: %d\n" - "\tCompression rate: %d%%\n", - tab->index, 100 - (100 * tab->index) / i); - /* Write everything to hardware */ ret = gfar_write_filer_table(priv, tab); if (ret == -EBUSY) { @@ -1725,13 +1405,14 @@ static int gfar_add_cls(struct gfar_private *priv, } process: + priv->rx_list.count++; ret = gfar_process_filer_changes(priv); if (ret) goto clean_list; - priv->rx_list.count++; return ret; clean_list: + priv->rx_list.count--; list_del(&temp->list); clean_mem: kfree(temp); diff --git a/kernel/drivers/net/ethernet/freescale/gianfar_ptp.c b/kernel/drivers/net/ethernet/freescale/gianfar_ptp.c index 8e3cd77aa..b40fba929 100644 --- a/kernel/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/kernel/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -467,7 +467,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) etsects->irq = platform_get_irq(dev, 0); - if (etsects->irq == NO_IRQ) { + if (etsects->irq < 0) { pr_err("irq not in device tree\n"); goto no_node; } @@ -557,6 +557,7 @@ static const struct of_device_id match_table[] = { { .compatible = "fsl,etsec-ptp" }, {}, }; +MODULE_DEVICE_TABLE(of, match_table); static struct platform_driver gianfar_ptp_driver = { .driver = { diff --git a/kernel/drivers/net/ethernet/freescale/ucc_geth.c b/kernel/drivers/net/ethernet/freescale/ucc_geth.c index 4dd40e057..650f7888e 100644 --- a/kernel/drivers/net/ethernet/freescale/ucc_geth.c +++ b/kernel/drivers/net/ethernet/freescale/ucc_geth.c @@ -1384,6 +1384,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) value = phy_read(tbiphy, ENET_TBI_MII_CR); value &= ~0x1000; /* Turn off autonegotiation */ phy_write(tbiphy, ENET_TBI_MII_CR, value); + + put_device(&tbiphy->dev); } init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); @@ -1702,8 +1704,10 @@ static void uec_configure_serdes(struct net_device *dev) * everything for us? Resetting it takes the link down and requires * several seconds for it to come back. */ - if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) + if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) { + put_device(&tbiphy->dev); return; + } /* Single clk mode, mii mode off(for serdes communication) */ phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); @@ -1711,6 +1715,8 @@ static void uec_configure_serdes(struct net_device *dev) phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); + + put_device(&tbiphy->dev); } /* Configure the PHY for dev. diff --git a/kernel/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/kernel/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index cc83350d5..89714f5e0 100644 --- a/kernel/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/kernel/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -351,8 +351,6 @@ uec_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info)); - drvinfo->eedump_len = 0; - drvinfo->regdump_len = uec_get_regs_len(netdev); } #ifdef CONFIG_PM -- cgit 1.2.3-korg