summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/drivers/net/ethernet/freescale
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/drivers/net/ethernet/freescale')
-rw-r--r--kernel/drivers/net/ethernet/freescale/Kconfig96
-rw-r--r--kernel/drivers/net/ethernet/freescale/Makefile19
-rw-r--r--kernel/drivers/net/ethernet/freescale/fec.h569
-rw-r--r--kernel/drivers/net/ethernet/freescale/fec_main.c3503
-rw-r--r--kernel/drivers/net/ethernet/freescale/fec_mpc52xx.c1116
-rw-r--r--kernel/drivers/net/ethernet/freescale/fec_mpc52xx.h294
-rw-r--r--kernel/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c158
-rw-r--r--kernel/drivers/net/ethernet/freescale/fec_ptp.c637
-rw-r--r--kernel/drivers/net/ethernet/freescale/fs_enet/Kconfig34
-rw-r--r--kernel/drivers/net/ethernet/freescale/fs_enet/Makefile14
-rw-r--r--kernel/drivers/net/ethernet/freescale/fs_enet/fec.h44
-rw-r--r--kernel/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c1141
-rw-r--r--kernel/drivers/net/ethernet/freescale/fs_enet/fs_enet.h250
-rw-r--r--kernel/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c614
-rw-r--r--kernel/drivers/net/ethernet/freescale/fs_enet/mac-fec.c533
-rw-r--r--kernel/drivers/net/ethernet/freescale/fs_enet/mac-scc.c516
-rw-r--r--kernel/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c233
-rw-r--r--kernel/drivers/net/ethernet/freescale/fs_enet/mii-fec.c234
-rw-r--r--kernel/drivers/net/ethernet/freescale/fsl_pq_mdio.c499
-rw-r--r--kernel/drivers/net/ethernet/freescale/gianfar.c3621
-rw-r--r--kernel/drivers/net/ethernet/freescale/gianfar.h1343
-rw-r--r--kernel/drivers/net/ethernet/freescale/gianfar_ethtool.c1911
-rw-r--r--kernel/drivers/net/ethernet/freescale/gianfar_ptp.c574
-rw-r--r--kernel/drivers/net/ethernet/freescale/ucc_geth.c3983
-rw-r--r--kernel/drivers/net/ethernet/freescale/ucc_geth.h1238
-rw-r--r--kernel/drivers/net/ethernet/freescale/ucc_geth_ethtool.c421
-rw-r--r--kernel/drivers/net/ethernet/freescale/xgmac_mdio.c333
27 files changed, 23928 insertions, 0 deletions
diff --git a/kernel/drivers/net/ethernet/freescale/Kconfig b/kernel/drivers/net/ethernet/freescale/Kconfig
new file mode 100644
index 000000000..25e342572
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/Kconfig
@@ -0,0 +1,96 @@
+#
+# Freescale device configuration
+#
+
+config NET_VENDOR_FREESCALE
+ bool "Freescale devices"
+ default y
+ depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
+ M523x || M527x || M5272 || M528x || M520x || M532x || \
+ ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Freescale devices. If you say Y, you will be
+ asked for your specific card in the following questions.
+
+if NET_VENDOR_FREESCALE
+
+config FEC
+ tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+ depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
+ ARCH_MXC || SOC_IMX28)
+ default ARCH_MXC || SOC_IMX28 if ARM
+ select PHYLIB
+ select PTP_1588_CLOCK
+ ---help---
+ Say Y here if you want to use the built-in 10/100 Fast ethernet
+ controller on some Motorola ColdFire and Freescale i.MX processors.
+
+config FEC_MPC52xx
+ tristate "FEC MPC52xx driver"
+ depends on PPC_MPC52xx && PPC_BESTCOMM
+ select CRC32
+ select PHYLIB
+ select PPC_BESTCOMM_FEC
+ ---help---
+ This option enables support for the MPC5200's on-chip
+ Fast Ethernet Controller
+ If compiled as module, it will be called fec_mpc52xx.
+
+config FEC_MPC52xx_MDIO
+ bool "FEC MPC52xx MDIO bus driver"
+ depends on FEC_MPC52xx
+ default y
+ ---help---
+ The MPC5200's FEC can connect to the Ethernet either with
+ an external MII PHY chip or 10 Mbps 7-wire interface
+ (Motorola? industry standard).
+ If your board uses an external PHY connected to FEC, enable this.
+ If not sure, enable.
+ If compiled as module, it will be called fec_mpc52xx_phy.
+
+source "drivers/net/ethernet/freescale/fs_enet/Kconfig"
+
+config FSL_PQ_MDIO
+ tristate "Freescale PQ MDIO"
+ select PHYLIB
+ ---help---
+ This driver supports the MDIO bus used by the gianfar and UCC drivers.
+
+config FSL_XGMAC_MDIO
+ tristate "Freescale XGMAC MDIO"
+ select PHYLIB
+ select OF_MDIO
+ ---help---
+ This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
+ on the FMan mEMAC (which supports both Clauses 22 and 45)
+
+config UCC_GETH
+ tristate "Freescale QE Gigabit Ethernet"
+ depends on QUICC_ENGINE
+ select FSL_PQ_MDIO
+ select PHYLIB
+ ---help---
+ This driver supports the Gigabit Ethernet mode of the QUICC Engine,
+ which is available on some Freescale SOCs.
+
+config UGETH_TX_ON_DEMAND
+ bool "Transmit on Demand support"
+ depends on UCC_GETH
+
+config GIANFAR
+ tristate "Gianfar Ethernet"
+ depends on FSL_SOC
+ select FSL_PQ_MDIO
+ select PHYLIB
+ select CRC32
+ ---help---
+ This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
+ and MPC86xx family of chips, and the FEC on the 8540.
+
+endif # NET_VENDOR_FREESCALE
diff --git a/kernel/drivers/net/ethernet/freescale/Makefile b/kernel/drivers/net/ethernet/freescale/Makefile
new file mode 100644
index 000000000..71debd1c1
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for the Freescale network device drivers.
+#
+
+obj-$(CONFIG_FEC) += fec.o
+fec-objs :=fec_main.o fec_ptp.o
+obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
+ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
+ obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
+endif
+obj-$(CONFIG_FS_ENET) += fs_enet/
+obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o
+obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
+obj-$(CONFIG_GIANFAR) += gianfar_driver.o
+obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
+gianfar_driver-objs := gianfar.o \
+ gianfar_ethtool.o
+obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
+ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
diff --git a/kernel/drivers/net/ethernet/freescale/fec.h b/kernel/drivers/net/ethernet/freescale/fec.h
new file mode 100644
index 000000000..a86af8a74
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fec.h
@@ -0,0 +1,569 @@
+/****************************************************************************/
+
+/*
+ * fec.h -- Fast Ethernet Controller for Motorola ColdFire SoC
+ * processors.
+ *
+ * (C) Copyright 2000-2005, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 2000-2001, Lineo (www.lineo.com)
+ */
+
+/****************************************************************************/
+#ifndef FEC_H
+#define FEC_H
+/****************************************************************************/
+
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+/*
+ * Just figures, Motorola would have to change the offsets for
+ * registers in the same peripheral device on different models
+ * of the ColdFire!
+ */
+#define FEC_IEVENT 0x004 /* Interrupt event reg */
+#define FEC_IMASK 0x008 /* Interrupt mask reg */
+#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */
+#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */
+#define FEC_ECNTRL 0x024 /* Ethernet control reg */
+#define FEC_MII_DATA 0x040 /* MII manage frame reg */
+#define FEC_MII_SPEED 0x044 /* MII speed control reg */
+#define FEC_MIB_CTRLSTAT 0x064 /* MIB control/status reg */
+#define FEC_R_CNTRL 0x084 /* Receive control reg */
+#define FEC_X_CNTRL 0x0c4 /* Transmit Control reg */
+#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */
+#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */
+#define FEC_OPD 0x0ec /* Opcode + Pause duration */
+#define FEC_TXIC0 0x0f0 /* Tx Interrupt Coalescing for ring 0 */
+#define FEC_TXIC1 0x0f4 /* Tx Interrupt Coalescing for ring 1 */
+#define FEC_TXIC2 0x0f8 /* Tx Interrupt Coalescing for ring 2 */
+#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */
+#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */
+#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */
+#define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */
+#define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */
+#define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */
+#define FEC_GRP_HASH_TABLE_LOW 0x124 /* Low 32bits hash table */
+#define FEC_X_WMRK 0x144 /* FIFO transmit water mark */
+#define FEC_R_BOUND 0x14c /* FIFO receive bound reg */
+#define FEC_R_FSTART 0x150 /* FIFO receive start reg */
+#define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */
+#define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */
+#define FEC_R_BUFF_SIZE_1 0x168 /* Maximum receive buff ring1 size */
+#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */
+#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */
+#define FEC_R_BUFF_SIZE_2 0x174 /* Maximum receive buff ring2 size */
+#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */
+#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */
+#define FEC_R_BUFF_SIZE_0 0x188 /* Maximum receive buff size */
+#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */
+#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
+#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
+#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
+#define FEC_RACC 0x1c4 /* Receive Accelerator function */
+#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */
+#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */
+#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */
+#define FEC_DMA_CFG_2 0x1dc /* DMA class Configuration for ring 2 */
+#define FEC_R_DES_ACTIVE_1 0x1e0 /* Rx descriptor active for ring 1 */
+#define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */
+#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */
+#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */
+#define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */
+#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
+#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
+
+#define BM_MIIGSK_CFGR_MII 0x00
+#define BM_MIIGSK_CFGR_RMII 0x01
+#define BM_MIIGSK_CFGR_FRCONT_10M 0x40
+
+#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
+#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
+#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
+#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
+#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
+#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
+#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
+#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
+#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
+#define RMON_T_COL 0x224 /* RMON TX collision count */
+#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
+#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
+#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
+#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
+#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
+#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
+#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
+#define RMON_T_OCTETS 0x244 /* RMON TX octets */
+#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
+#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
+#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
+#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
+#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
+#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
+#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
+#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
+#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
+#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
+#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
+#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
+#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
+#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
+#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
+#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
+#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
+#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
+#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
+#define RMON_R_RESVD_O 0x2a4 /* Reserved */
+#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
+#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
+#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
+#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
+#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
+#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
+#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
+#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
+#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
+#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
+#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
+#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
+#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
+#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
+#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
+
+#else
+
+#define FEC_ECNTRL 0x000 /* Ethernet control reg */
+#define FEC_IEVENT 0x004 /* Interrupt even reg */
+#define FEC_IMASK 0x008 /* Interrupt mask reg */
+#define FEC_IVEC 0x00c /* Interrupt vec status reg */
+#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */
+#define FEC_R_DES_ACTIVE_1 FEC_R_DES_ACTIVE_0
+#define FEC_R_DES_ACTIVE_2 FEC_R_DES_ACTIVE_0
+#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */
+#define FEC_X_DES_ACTIVE_1 FEC_X_DES_ACTIVE_0
+#define FEC_X_DES_ACTIVE_2 FEC_X_DES_ACTIVE_0
+#define FEC_MII_DATA 0x040 /* MII manage frame reg */
+#define FEC_MII_SPEED 0x044 /* MII speed control reg */
+#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */
+#define FEC_R_FSTART 0x090 /* FIFO receive start reg */
+#define FEC_X_WMRK 0x0a4 /* FIFO transmit water mark */
+#define FEC_X_FSTART 0x0ac /* FIFO transmit start reg */
+#define FEC_R_CNTRL 0x104 /* Receive control reg */
+#define FEC_MAX_FRM_LEN 0x108 /* Maximum frame length reg */
+#define FEC_X_CNTRL 0x144 /* Transmit Control reg */
+#define FEC_ADDR_LOW 0x3c0 /* Low 32bits MAC address */
+#define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */
+#define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */
+#define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */
+#define FEC_R_DES_START_0 0x3d0 /* Receive descriptor ring */
+#define FEC_R_DES_START_1 FEC_R_DES_START_0
+#define FEC_R_DES_START_2 FEC_R_DES_START_0
+#define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */
+#define FEC_X_DES_START_1 FEC_X_DES_START_0
+#define FEC_X_DES_START_2 FEC_X_DES_START_0
+#define FEC_R_BUFF_SIZE_0 0x3d8 /* Maximum receive buff size */
+#define FEC_R_BUFF_SIZE_1 FEC_R_BUFF_SIZE_0
+#define FEC_R_BUFF_SIZE_2 FEC_R_BUFF_SIZE_0
+#define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */
+/* Not existed in real chip
+ * Just for pass build.
+ */
+#define FEC_RCMR_1 0xfff
+#define FEC_RCMR_2 0xfff
+#define FEC_DMA_CFG_1 0xfff
+#define FEC_DMA_CFG_2 0xfff
+#define FEC_TXIC0 0xfff
+#define FEC_TXIC1 0xfff
+#define FEC_TXIC2 0xfff
+#define FEC_RXIC0 0xfff
+#define FEC_RXIC1 0xfff
+#define FEC_RXIC2 0xfff
+#endif /* CONFIG_M5272 */
+
+
+/*
+ * Define the buffer descriptor structure.
+ */
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+struct bufdesc {
+ unsigned short cbd_datlen; /* Data length */
+ unsigned short cbd_sc; /* Control and status info */
+ unsigned long cbd_bufaddr; /* Buffer address */
+};
+#else
+struct bufdesc {
+ unsigned short cbd_sc; /* Control and status info */
+ unsigned short cbd_datlen; /* Data length */
+ unsigned long cbd_bufaddr; /* Buffer address */
+};
+#endif
+
+struct bufdesc_ex {
+ struct bufdesc desc;
+ unsigned long cbd_esc;
+ unsigned long cbd_prot;
+ unsigned long cbd_bdu;
+ unsigned long ts;
+ unsigned short res0[4];
+};
+
+/*
+ * The following definitions courtesy of commproc.h, which where
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
+ */
+#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
+#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
+#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
+#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
+#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
+#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
+#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
+#define BD_SC_BR ((ushort)0x0020) /* Break received */
+#define BD_SC_FR ((ushort)0x0010) /* Framing error */
+#define BD_SC_PR ((ushort)0x0008) /* Parity error */
+#define BD_SC_OV ((ushort)0x0002) /* Overrun */
+#define BD_SC_CD ((ushort)0x0001) /* ?? */
+
+/* Buffer descriptor control/status used by Ethernet receive.
+ */
+#define BD_ENET_RX_EMPTY ((ushort)0x8000)
+#define BD_ENET_RX_WRAP ((ushort)0x2000)
+#define BD_ENET_RX_INTR ((ushort)0x1000)
+#define BD_ENET_RX_LAST ((ushort)0x0800)
+#define BD_ENET_RX_FIRST ((ushort)0x0400)
+#define BD_ENET_RX_MISS ((ushort)0x0100)
+#define BD_ENET_RX_LG ((ushort)0x0020)
+#define BD_ENET_RX_NO ((ushort)0x0010)
+#define BD_ENET_RX_SH ((ushort)0x0008)
+#define BD_ENET_RX_CR ((ushort)0x0004)
+#define BD_ENET_RX_OV ((ushort)0x0002)
+#define BD_ENET_RX_CL ((ushort)0x0001)
+#define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */
+
+/* Enhanced buffer descriptor control/status used by Ethernet receive */
+#define BD_ENET_RX_VLAN 0x00000004
+
+/* Buffer descriptor control/status used by Ethernet transmit.
+ */
+#define BD_ENET_TX_READY ((ushort)0x8000)
+#define BD_ENET_TX_PAD ((ushort)0x4000)
+#define BD_ENET_TX_WRAP ((ushort)0x2000)
+#define BD_ENET_TX_INTR ((ushort)0x1000)
+#define BD_ENET_TX_LAST ((ushort)0x0800)
+#define BD_ENET_TX_TC ((ushort)0x0400)
+#define BD_ENET_TX_DEF ((ushort)0x0200)
+#define BD_ENET_TX_HB ((ushort)0x0100)
+#define BD_ENET_TX_LC ((ushort)0x0080)
+#define BD_ENET_TX_RL ((ushort)0x0040)
+#define BD_ENET_TX_RCMASK ((ushort)0x003c)
+#define BD_ENET_TX_UN ((ushort)0x0002)
+#define BD_ENET_TX_CSL ((ushort)0x0001)
+#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
+
+/* enhanced buffer descriptor control/status used by Ethernet transmit */
+#define BD_ENET_TX_INT 0x40000000
+#define BD_ENET_TX_TS 0x20000000
+#define BD_ENET_TX_PINS 0x10000000
+#define BD_ENET_TX_IINS 0x08000000
+
+
+/* This device has up to three irqs on some platforms */
+#define FEC_IRQ_NUM 3
+
+/* Maximum number of queues supported
+ * ENET with AVB IP can support up to 3 independent tx queues and rx queues.
+ * User can point the queue number that is less than or equal to 3.
+ */
+#define FEC_ENET_MAX_TX_QS 3
+#define FEC_ENET_MAX_RX_QS 3
+
+#define FEC_R_DES_START(X) (((X) == 1) ? FEC_R_DES_START_1 : \
+ (((X) == 2) ? \
+ FEC_R_DES_START_2 : FEC_R_DES_START_0))
+#define FEC_X_DES_START(X) (((X) == 1) ? FEC_X_DES_START_1 : \
+ (((X) == 2) ? \
+ FEC_X_DES_START_2 : FEC_X_DES_START_0))
+#define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
+ (((X) == 2) ? \
+ FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
+#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \
+ (((X) == 2) ? \
+ FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
+#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \
+ (((X) == 2) ? \
+ FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
+
+#define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
+
+#define DMA_CLASS_EN (1 << 16)
+#define FEC_RCMR(X) (((X) == 2) ? FEC_RCMR_2 : FEC_RCMR_1)
+#define IDLE_SLOPE_MASK 0xffff
+#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */
+#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */
+#define IDLE_SLOPE(X) (((X) == 1) ? \
+ (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \
+ (IDLE_SLOPE_2 & IDLE_SLOPE_MASK))
+#define RCMR_MATCHEN (0x1 << 16)
+#define RCMR_CMP_CFG(v, n) (((v) & 0x7) << (n << 2))
+#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \
+ RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3))
+#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \
+ RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3))
+#define RCMR_CMP(X) (((X) == 1) ? RCMR_CMP_1 : RCMR_CMP_2)
+#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20)
+
+/* The number of Tx and Rx buffers. These are allocated from the page
+ * pool. The code may assume these are power of two, so it it best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter. We just use
+ * the skbuffer directly.
+ */
+
+#define FEC_ENET_RX_PAGES 256
+#define FEC_ENET_RX_FRSIZE 2048
+#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+#define FEC_ENET_TX_FRSIZE 2048
+#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
+#define TX_RING_SIZE 512 /* Must be power of two */
+#define TX_RING_MOD_MASK 511 /* for this to work */
+
+#define BD_ENET_RX_INT 0x00800000
+#define BD_ENET_RX_PTP ((ushort)0x0400)
+#define BD_ENET_RX_ICE 0x00000020
+#define BD_ENET_RX_PCR 0x00000010
+#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+
+/* Interrupt events/masks. */
+#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
+#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
+#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
+#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
+#define FEC_ENET_TXF_0 ((uint)0x08000000) /* Full frame transmitted */
+#define FEC_ENET_TXF_1 ((uint)0x00000008) /* Full frame transmitted */
+#define FEC_ENET_TXF_2 ((uint)0x00000080) /* Full frame transmitted */
+#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
+#define FEC_ENET_RXF_0 ((uint)0x02000000) /* Full frame received */
+#define FEC_ENET_RXF_1 ((uint)0x00000002) /* Full frame received */
+#define FEC_ENET_RXF_2 ((uint)0x00000020) /* Full frame received */
+#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
+#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
+#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
+#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
+#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
+#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
+#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
+#define FEC_ENET_TS_TIMER ((uint)0x00008000)
+
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
+#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
+
+/* ENET interrupt coalescing macro define */
+#define FEC_ITR_CLK_SEL (0x1 << 30)
+#define FEC_ITR_EN (0x1 << 31)
+#define FEC_ITR_ICFT(X) (((X) & 0xff) << 20)
+#define FEC_ITR_ICTT(X) ((X) & 0xffff)
+#define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */
+#define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */
+
+#define FEC_VLAN_TAG_LEN 0x04
+#define FEC_ETHTYPE_LEN 0x02
+
+/* Controller is ENET-MAC */
+#define FEC_QUIRK_ENET_MAC (1 << 0)
+/* Controller needs driver to swap frame */
+#define FEC_QUIRK_SWAP_FRAME (1 << 1)
+/* Controller uses gasket */
+#define FEC_QUIRK_USE_GASKET (1 << 2)
+/* Controller has GBIT support */
+#define FEC_QUIRK_HAS_GBIT (1 << 3)
+/* Controller has extend desc buffer */
+#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
+/* Controller has hardware checksum support */
+#define FEC_QUIRK_HAS_CSUM (1 << 5)
+/* Controller has hardware vlan support */
+#define FEC_QUIRK_HAS_VLAN (1 << 6)
+/* ENET IP errata ERR006358
+ *
+ * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
+ * detected as not set during a prior frame transmission, then the
+ * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
+ * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
+ * frames not being transmitted until there is a 0-to-1 transition on
+ * ENET_TDAR[TDAR].
+ */
+#define FEC_QUIRK_ERR006358 (1 << 7)
+/* ENET IP hw AVB
+ *
+ * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support.
+ * - Two class indicators on receive with configurable priority
+ * - Two class indicators and line speed timer on transmit allowing
+ * implementation class credit based shapers externally
+ * - Additional DMA registers provisioned to allow managing up to 3
+ * independent rings
+ */
+#define FEC_QUIRK_HAS_AVB (1 << 8)
+/* There is a TDAR race condition for mutliQ when the software sets TDAR
+ * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
+ * This will cause the udma_tx and udma_tx_arbiter state machines to hang.
+ * The issue exist at i.MX6SX enet IP.
+ */
+#define FEC_QUIRK_ERR007885 (1 << 9)
+/* ENET Block Guide/ Chapter for the iMX6SX (PELE) address one issue:
+ * After set ENET_ATCR[Capture], there need some time cycles before the counter
+ * value is capture in the register clock domain.
+ * The wait-time-cycles is at least 6 clock cycles of the slower clock between
+ * the register clock and the 1588 clock. The 1588 ts_clk is fixed to 25Mhz,
+ * register clock is 66Mhz, so the wait-time-cycles must be greater than 240ns
+ * (40ns * 6).
+ */
+#define FEC_QUIRK_BUG_CAPTURE (1 << 10)
+/* Controller has only one MDIO bus */
+#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
+
+struct fec_enet_priv_tx_q {
+ int index;
+ unsigned char *tx_bounce[TX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+
+ dma_addr_t bd_dma;
+ struct bufdesc *tx_bd_base;
+ uint tx_ring_size;
+
+ unsigned short tx_stop_threshold;
+ unsigned short tx_wake_threshold;
+
+ struct bufdesc *cur_tx;
+ struct bufdesc *dirty_tx;
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_dma;
+};
+
+struct fec_enet_priv_rx_q {
+ int index;
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+
+ dma_addr_t bd_dma;
+ struct bufdesc *rx_bd_base;
+ uint rx_ring_size;
+
+ struct bufdesc *cur_rx;
+};
+
+/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors. The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+ /* Hardware registers of the FEC device */
+ void __iomem *hwp;
+
+ struct net_device *netdev;
+
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ struct clk *clk_ref;
+ struct clk *clk_enet_out;
+ struct clk *clk_ptp;
+
+ bool ptp_clk_on;
+ struct mutex ptp_clk_mutex;
+ unsigned int num_tx_queues;
+ unsigned int num_rx_queues;
+
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
+ struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
+
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
+
+ unsigned long work_tx;
+ unsigned long work_rx;
+ unsigned long work_ts;
+ unsigned long work_mdio;
+
+ unsigned short bufdesc_size;
+
+ struct platform_device *pdev;
+
+ int dev_id;
+
+ /* Phylib and MDIO interface */
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ int mii_timeout;
+ uint phy_speed;
+ phy_interface_t phy_interface;
+ struct device_node *phy_node;
+ int link;
+ int full_duplex;
+ int speed;
+ struct completion mdio_done;
+ int irq[FEC_IRQ_NUM];
+ bool bufdesc_ex;
+ int pause_flag;
+ int wol_flag;
+ u32 quirks;
+
+ struct napi_struct napi;
+ int csum_flags;
+
+ struct work_struct tx_timeout_work;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ unsigned long last_overflow_check;
+ spinlock_t tmreg_lock;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ int rx_hwtstamp_filter;
+ u32 base_incval;
+ u32 cycle_speed;
+ int hwts_rx_en;
+ int hwts_tx_en;
+ struct delayed_work time_keep;
+ struct regulator *reg_phy;
+
+ unsigned int tx_align;
+ unsigned int rx_align;
+
+ /* hw interrupt coalesce */
+ unsigned int rx_pkts_itr;
+ unsigned int rx_time_itr;
+ unsigned int tx_pkts_itr;
+ unsigned int tx_time_itr;
+ unsigned int itr_clk_rate;
+
+ u32 rx_copybreak;
+
+ /* ptp clock period in ns*/
+ unsigned int ptp_inc;
+
+ /* pps */
+ int pps_channel;
+ unsigned int reload_period;
+ int pps_enable;
+ unsigned int next_counter;
+};
+
+void fec_ptp_init(struct platform_device *pdev);
+void fec_ptp_start_cyclecounter(struct net_device *ndev);
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
+uint fec_ptp_check_pps_event(struct fec_enet_private *fep);
+
+/****************************************************************************/
+#endif /* FEC_H */
diff --git a/kernel/drivers/net/ethernet/freescale/fec_main.c b/kernel/drivers/net/ethernet/freescale/fec_main.c
new file mode 100644
index 000000000..66d47e448
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fec_main.c
@@ -0,0 +1,3503 @@
+/*
+ * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * Right now, I am very wasteful with the buffers. I allocate memory
+ * pages and then divide them into 2K frame buffers. This way I know I
+ * have buffers large enough to hold one frame within one buffer descriptor.
+ * Once I get this working, I will use 64 or 128 byte CPM buffers, which
+ * will be much more memory efficient and will easily handle lots of
+ * small packets.
+ *
+ * Much better multiple PHY support by Magnus Damm.
+ * Copyright (c) 2000 Ericsson Radio Systems AB.
+ *
+ * Support for FEC controller of ColdFire processors.
+ * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
+ *
+ * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
+ * Copyright (c) 2004-2006 Macq Electronique SA.
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net/tso.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/regulator/consumer.h>
+#include <linux/if_vlan.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/prefetch.h>
+
+#include <asm/cacheflush.h>
+
+#include "fec.h"
+
+static void set_multicast_list(struct net_device *ndev);
+static void fec_enet_itr_coal_init(struct net_device *ndev);
+
+#define DRIVER_NAME "fec"
+
+#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
+
+/* Pause frame feild and FIFO threshold */
+#define FEC_ENET_FCE (1 << 5)
+#define FEC_ENET_RSEM_V 0x84
+#define FEC_ENET_RSFL_V 16
+#define FEC_ENET_RAEM_V 0x8
+#define FEC_ENET_RAFL_V 0x8
+#define FEC_ENET_OPD_V 0xFFF0
+
+static struct platform_device_id fec_devtype[] = {
+ {
+ /* keep it for coldfire */
+ .name = DRIVER_NAME,
+ .driver_data = 0,
+ }, {
+ .name = "imx25-fec",
+ .driver_data = FEC_QUIRK_USE_GASKET,
+ }, {
+ .name = "imx27-fec",
+ .driver_data = 0,
+ }, {
+ .name = "imx28-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+ FEC_QUIRK_SINGLE_MDIO,
+ }, {
+ .name = "imx6q-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
+ }, {
+ .name = "mvf600-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC,
+ }, {
+ .name = "imx6sx-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, fec_devtype);
+
+enum imx_fec_type {
+ IMX25_FEC = 1, /* runs on i.mx25/50/53 */
+ IMX27_FEC, /* runs on i.mx27/35/51 */
+ IMX28_FEC,
+ IMX6Q_FEC,
+ MVF600_FEC,
+ IMX6SX_FEC,
+};
+
+static const struct of_device_id fec_dt_ids[] = {
+ { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
+ { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
+ { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
+ { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
+ { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
+ { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fec_dt_ids);
+
+static unsigned char macaddr[ETH_ALEN];
+module_param_array(macaddr, byte, NULL, 0);
+MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+
+#if defined(CONFIG_M5272)
+/*
+ * Some hardware gets it MAC address out of local flash memory.
+ * if this is non-zero then assume it is the address to get MAC from.
+ */
+#if defined(CONFIG_NETtel)
+#define FEC_FLASHMAC 0xf0006006
+#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
+#define FEC_FLASHMAC 0xf0006000
+#elif defined(CONFIG_CANCam)
+#define FEC_FLASHMAC 0xf0020000
+#elif defined (CONFIG_M5272C3)
+#define FEC_FLASHMAC (0xffe04000 + 4)
+#elif defined(CONFIG_MOD5272)
+#define FEC_FLASHMAC 0xffc0406b
+#else
+#define FEC_FLASHMAC 0
+#endif
+#endif /* CONFIG_M5272 */
+
+/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
+ */
+#define PKT_MAXBUF_SIZE 1522
+#define PKT_MINBUF_SIZE 64
+#define PKT_MAXBLR_SIZE 1536
+
+/* FEC receive acceleration */
+#define FEC_RACC_IPDIS (1 << 1)
+#define FEC_RACC_PRODIS (1 << 2)
+#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
+
+/*
+ * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
+ * size bits. Other FEC hardware does not, so we need to take that into
+ * account when setting it.
+ */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
+#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
+#else
+#define OPT_FRAME_SIZE 0
+#endif
+
+/* FEC MII MMFR bits definition */
+#define FEC_MMFR_ST (1 << 30)
+#define FEC_MMFR_OP_READ (2 << 28)
+#define FEC_MMFR_OP_WRITE (1 << 28)
+#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
+#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
+#define FEC_MMFR_TA (2 << 16)
+#define FEC_MMFR_DATA(v) (v & 0xffff)
+/* FEC ECR bits definition */
+#define FEC_ECR_MAGICEN (1 << 2)
+#define FEC_ECR_SLEEP (1 << 3)
+
+#define FEC_MII_TIMEOUT 30000 /* us */
+
+/* Transmitter timeout */
+#define TX_TIMEOUT (2 * HZ)
+
+#define FEC_PAUSE_FLAG_AUTONEG 0x1
+#define FEC_PAUSE_FLAG_ENABLE 0x2
+#define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
+#define FEC_WOL_FLAG_ENABLE (0x1 << 1)
+#define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
+
+#define COPYBREAK_DEFAULT 256
+
+#define TSO_HEADER_SIZE 128
+/* Max number of allowed TCP segments for software TSO */
+#define FEC_MAX_TSO_SEGS 100
+#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_dma) && \
+ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
+static int mii_cnt;
+
+static inline
+struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
+ struct fec_enet_private *fep,
+ int queue_id)
+{
+ struct bufdesc *new_bd = bdp + 1;
+ struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
+ struct bufdesc_ex *ex_base;
+ struct bufdesc *base;
+ int ring_size;
+
+ if (bdp >= txq->tx_bd_base) {
+ base = txq->tx_bd_base;
+ ring_size = txq->tx_ring_size;
+ ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
+ } else {
+ base = rxq->rx_bd_base;
+ ring_size = rxq->rx_ring_size;
+ ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
+ }
+
+ if (fep->bufdesc_ex)
+ return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
+ ex_base : ex_new_bd);
+ else
+ return (new_bd >= (base + ring_size)) ?
+ base : new_bd;
+}
+
+static inline
+struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
+ struct fec_enet_private *fep,
+ int queue_id)
+{
+ struct bufdesc *new_bd = bdp - 1;
+ struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
+ struct bufdesc_ex *ex_base;
+ struct bufdesc *base;
+ int ring_size;
+
+ if (bdp >= txq->tx_bd_base) {
+ base = txq->tx_bd_base;
+ ring_size = txq->tx_ring_size;
+ ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
+ } else {
+ base = rxq->rx_bd_base;
+ ring_size = rxq->rx_ring_size;
+ ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
+ }
+
+ if (fep->bufdesc_ex)
+ return (struct bufdesc *)((ex_new_bd < ex_base) ?
+ (ex_new_bd + ring_size) : ex_new_bd);
+ else
+ return (new_bd < base) ? (new_bd + ring_size) : new_bd;
+}
+
+static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
+ struct fec_enet_private *fep)
+{
+ return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
+}
+
+static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
+ struct fec_enet_priv_tx_q *txq)
+{
+ int entries;
+
+ entries = ((const char *)txq->dirty_tx -
+ (const char *)txq->cur_tx) / fep->bufdesc_size - 1;
+
+ return entries > 0 ? entries : entries + txq->tx_ring_size;
+}
+
+static void swap_buffer(void *bufaddr, int len)
+{
+ int i;
+ unsigned int *buf = bufaddr;
+
+ for (i = 0; i < len; i += 4, buf++)
+ swab32s(buf);
+}
+
+static void swap_buffer2(void *dst_buf, void *src_buf, int len)
+{
+ int i;
+ unsigned int *src = src_buf;
+ unsigned int *dst = dst_buf;
+
+ for (i = 0; i < len; i += 4, src++, dst++)
+ *dst = swab32p(src);
+}
+
+static void fec_dump(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct bufdesc *bdp;
+ struct fec_enet_priv_tx_q *txq;
+ int index = 0;
+
+ netdev_info(ndev, "TX ring dump\n");
+ pr_info("Nr SC addr len SKB\n");
+
+ txq = fep->tx_queue[0];
+ bdp = txq->tx_bd_base;
+
+ do {
+ pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
+ index,
+ bdp == txq->cur_tx ? 'S' : ' ',
+ bdp == txq->dirty_tx ? 'H' : ' ',
+ bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
+ txq->tx_skbuff[index]);
+ bdp = fec_enet_get_nextdesc(bdp, fep, 0);
+ index++;
+ } while (bdp != txq->tx_bd_base);
+}
+
+static inline bool is_ipv4_pkt(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
+}
+
+static int
+fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
+{
+ /* Only run for packets requiring a checksum. */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (unlikely(skb_cow_head(skb, 0)))
+ return -1;
+
+ if (is_ipv4_pkt(skb))
+ ip_hdr(skb)->check = 0;
+ *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
+
+ return 0;
+}
+
+static int
+fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct bufdesc *bdp = txq->cur_tx;
+ struct bufdesc_ex *ebdp;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned short queue = skb_get_queue_mapping(skb);
+ int frag, frag_len;
+ unsigned short status;
+ unsigned int estatus = 0;
+ skb_frag_t *this_frag;
+ unsigned int index;
+ void *bufaddr;
+ dma_addr_t addr;
+ int i;
+
+ for (frag = 0; frag < nr_frags; frag++) {
+ this_frag = &skb_shinfo(skb)->frags[frag];
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ ebdp = (struct bufdesc_ex *)bdp;
+
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+ frag_len = skb_shinfo(skb)->frags[frag].size;
+
+ /* Handle the last BD specially */
+ if (frag == nr_frags - 1) {
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex) {
+ estatus |= BD_ENET_TX_INT;
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
+ estatus |= BD_ENET_TX_TS;
+ }
+ }
+
+ if (fep->bufdesc_ex) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(queue);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+ if (((unsigned long) bufaddr) & fep->tx_align ||
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(txq->tx_bounce[index], bufaddr, frag_len);
+ bufaddr = txq->tx_bounce[index];
+
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, frag_len);
+ }
+
+ addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ goto dma_mapping_error;
+ }
+
+ bdp->cbd_bufaddr = addr;
+ bdp->cbd_datlen = frag_len;
+ bdp->cbd_sc = status;
+ }
+
+ txq->cur_tx = bdp;
+
+ return 0;
+
+dma_mapping_error:
+ bdp = txq->cur_tx;
+ for (i = 0; i < frag; i++) {
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ bdp->cbd_datlen, DMA_TO_DEVICE);
+ }
+ return NETDEV_TX_OK;
+}
+
+static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct bufdesc *bdp, *last_bdp;
+ void *bufaddr;
+ dma_addr_t addr;
+ unsigned short status;
+ unsigned short buflen;
+ unsigned short queue;
+ unsigned int estatus = 0;
+ unsigned int index;
+ int entries_free;
+ int ret;
+
+ entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+ if (entries_free < MAX_SKB_FRAGS + 1) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "NOT enough BD for SG!\n");
+ return NETDEV_TX_OK;
+ }
+
+ /* Protocol checksum off-load for TCP and UDP. */
+ if (fec_enet_clear_csum(skb, ndev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Fill in a Tx ring entry */
+ bdp = txq->cur_tx;
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+
+ /* Set buffer length and buffer pointer */
+ bufaddr = skb->data;
+ buflen = skb_headlen(skb);
+
+ queue = skb_get_queue_mapping(skb);
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+ if (((unsigned long) bufaddr) & fep->tx_align ||
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(txq->tx_bounce[index], skb->data, buflen);
+ bufaddr = txq->tx_bounce[index];
+
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, buflen);
+ }
+
+ /* Push the data cache so the CPM does not get stale memory data. */
+ addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_OK;
+ }
+
+ if (nr_frags) {
+ ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
+ if (ret)
+ return ret;
+ } else {
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex) {
+ estatus = BD_ENET_TX_INT;
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
+ estatus |= BD_ENET_TX_TS;
+ }
+ }
+
+ if (fep->bufdesc_ex) {
+
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ fep->hwts_tx_en))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(queue);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ last_bdp = txq->cur_tx;
+ index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
+ /* Save skb pointer */
+ txq->tx_skbuff[index] = skb;
+
+ bdp->cbd_datlen = buflen;
+ bdp->cbd_bufaddr = addr;
+
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
+ bdp->cbd_sc = status;
+
+ /* If this was the last BD in the ring, start at the beginning again. */
+ bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
+
+ skb_tx_timestamp(skb);
+
+ txq->cur_tx = bdp;
+
+ /* Trigger transmission start */
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
+
+ return 0;
+}
+
+static int
+fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
+ struct net_device *ndev,
+ struct bufdesc *bdp, int index, char *data,
+ int size, bool last_tcp, bool is_last)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
+ unsigned short queue = skb_get_queue_mapping(skb);
+ unsigned short status;
+ unsigned int estatus = 0;
+ dma_addr_t addr;
+
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+
+ if (((unsigned long) data) & fep->tx_align ||
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(txq->tx_bounce[index], data, size);
+ data = txq->tx_bounce[index];
+
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(data, size);
+ }
+
+ addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ bdp->cbd_datlen = size;
+ bdp->cbd_bufaddr = addr;
+
+ if (fep->bufdesc_ex) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(queue);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ /* Handle the last BD specially */
+ if (last_tcp)
+ status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ if (is_last) {
+ status |= BD_ENET_TX_INTR;
+ if (fep->bufdesc_ex)
+ ebdp->cbd_esc |= BD_ENET_TX_INT;
+ }
+
+ bdp->cbd_sc = status;
+
+ return 0;
+}
+
+static int
+fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb, struct net_device *ndev,
+ struct bufdesc *bdp, int index)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
+ unsigned short queue = skb_get_queue_mapping(skb);
+ void *bufaddr;
+ unsigned long dmabuf;
+ unsigned short status;
+ unsigned int estatus = 0;
+
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+
+ bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
+ dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
+ if (((unsigned long)bufaddr) & fep->tx_align ||
+ fep->quirks & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(txq->tx_bounce[index], skb->data, hdr_len);
+ bufaddr = txq->tx_bounce[index];
+
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, hdr_len);
+
+ dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
+ hdr_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ bdp->cbd_bufaddr = dmabuf;
+ bdp->cbd_datlen = hdr_len;
+
+ if (fep->bufdesc_ex) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(queue);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ bdp->cbd_sc = status;
+
+ return 0;
+}
+
+static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int total_len, data_left;
+ struct bufdesc *bdp = txq->cur_tx;
+ unsigned short queue = skb_get_queue_mapping(skb);
+ struct tso_t tso;
+ unsigned int index = 0;
+ int ret;
+
+ if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "NOT enough BD for TSO!\n");
+ return NETDEV_TX_OK;
+ }
+
+ /* Protocol checksum off-load for TCP and UDP. */
+ if (fec_enet_clear_csum(skb, ndev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
+ if (ret)
+ goto err_release;
+
+ while (data_left > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_left);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ index = fec_enet_get_bd_index(txq->tx_bd_base,
+ bdp, fep);
+ ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
+ bdp, index,
+ tso.data, size,
+ size == data_left,
+ total_len == 0);
+ if (ret)
+ goto err_release;
+
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ }
+
+ /* Save skb pointer */
+ txq->tx_skbuff[index] = skb;
+
+ skb_tx_timestamp(skb);
+ txq->cur_tx = bdp;
+
+ /* Trigger transmission start */
+ if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)))
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
+
+ return 0;
+
+err_release:
+ /* TODO: Release all used data descriptors for TSO */
+ return ret;
+}
+
+static netdev_tx_t
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int entries_free;
+ unsigned short queue;
+ struct fec_enet_priv_tx_q *txq;
+ struct netdev_queue *nq;
+ int ret;
+
+ queue = skb_get_queue_mapping(skb);
+ txq = fep->tx_queue[queue];
+ nq = netdev_get_tx_queue(ndev, queue);
+
+ if (skb_is_gso(skb))
+ ret = fec_enet_txq_submit_tso(txq, skb, ndev);
+ else
+ ret = fec_enet_txq_submit_skb(txq, skb, ndev);
+ if (ret)
+ return ret;
+
+ entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+ if (entries_free <= txq->tx_stop_threshold)
+ netif_tx_stop_queue(nq);
+
+ return NETDEV_TX_OK;
+}
+
+/* Init RX & TX buffer descriptors
+ */
+static void fec_enet_bd_init(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
+ struct bufdesc *bdp;
+ unsigned int i;
+ unsigned int q;
+
+ for (q = 0; q < fep->num_rx_queues; q++) {
+ /* Initialize the receive buffer descriptors. */
+ rxq = fep->rx_queue[q];
+ bdp = rxq->rx_bd_base;
+
+ for (i = 0; i < rxq->rx_ring_size; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ if (bdp->cbd_bufaddr)
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ else
+ bdp->cbd_sc = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep, q);
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ rxq->cur_rx = rxq->rx_bd_base;
+ }
+
+ for (q = 0; q < fep->num_tx_queues; q++) {
+ /* ...and the same for transmit */
+ txq = fep->tx_queue[q];
+ bdp = txq->tx_bd_base;
+ txq->cur_tx = bdp;
+
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ if (txq->tx_skbuff[i]) {
+ dev_kfree_skb_any(txq->tx_skbuff[i]);
+ txq->tx_skbuff[i] = NULL;
+ }
+ bdp->cbd_bufaddr = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep, q);
+ bdp->cbd_sc |= BD_SC_WRAP;
+ txq->dirty_tx = bdp;
+ }
+}
+
+static void fec_enet_active_rxring(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < fep->num_rx_queues; i++)
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE(i));
+}
+
+static void fec_enet_enable_ring(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
+ int i;
+
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ rxq = fep->rx_queue[i];
+ writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
+ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+
+ /* enable DMA1/2 */
+ if (i)
+ writel(RCMR_MATCHEN | RCMR_CMP(i),
+ fep->hwp + FEC_RCMR(i));
+ }
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+ writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i));
+
+ /* enable DMA1/2 */
+ if (i)
+ writel(DMA_CLASS_EN | IDLE_SLOPE(i),
+ fep->hwp + FEC_DMA_CFG(i));
+ }
+}
+
+static void fec_enet_reset_skb(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_tx_q *txq;
+ int i, j;
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+
+ for (j = 0; j < txq->tx_ring_size; j++) {
+ if (txq->tx_skbuff[j]) {
+ dev_kfree_skb_any(txq->tx_skbuff[j]);
+ txq->tx_skbuff[j] = NULL;
+ }
+ }
+ }
+}
+
+/*
+ * This function is called to start or restart the FEC during a link
+ * change, transmit timeout, or to reconfigure the FEC. The network
+ * packet processing for this device must be stopped before this call.
+ */
+static void
+fec_restart(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u32 val;
+ u32 temp_mac[2];
+ u32 rcntl = OPT_FRAME_SIZE | 0x04;
+ u32 ecntl = 0x2; /* ETHEREN */
+
+ /* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+
+ /*
+ * enet-mac reset will reset mac address registers too,
+ * so need to reconfigure it.
+ */
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+ writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+ writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+ }
+
+ /* Clear any outstanding interrupt. */
+ writel(0xffffffff, fep->hwp + FEC_IEVENT);
+
+ fec_enet_bd_init(ndev);
+
+ fec_enet_enable_ring(ndev);
+
+ /* Reset tx SKB buffers. */
+ fec_enet_reset_skb(ndev);
+
+ /* Enable MII mode */
+ if (fep->full_duplex == DUPLEX_FULL) {
+ /* FD enable */
+ writel(0x04, fep->hwp + FEC_X_CNTRL);
+ } else {
+ /* No Rcv on Xmit */
+ rcntl |= 0x02;
+ writel(0x0, fep->hwp + FEC_X_CNTRL);
+ }
+
+ /* Set MII speed */
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+#if !defined(CONFIG_M5272)
+ /* set RX checksum */
+ val = readl(fep->hwp + FEC_RACC);
+ if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
+ val |= FEC_RACC_OPTIONS;
+ else
+ val &= ~FEC_RACC_OPTIONS;
+ writel(val, fep->hwp + FEC_RACC);
+#endif
+
+ /*
+ * The phy interface and speed need to get configured
+ * differently on enet-mac.
+ */
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+ /* Enable flow control and length check */
+ rcntl |= 0x40000000 | 0x00000020;
+
+ /* RGMII, RMII or MII */
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ rcntl |= (1 << 6);
+ else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ rcntl |= (1 << 8);
+ else
+ rcntl &= ~(1 << 8);
+
+ /* 1G, 100M or 10M */
+ if (fep->phy_dev) {
+ if (fep->phy_dev->speed == SPEED_1000)
+ ecntl |= (1 << 5);
+ else if (fep->phy_dev->speed == SPEED_100)
+ rcntl &= ~(1 << 9);
+ else
+ rcntl |= (1 << 9);
+ }
+ } else {
+#ifdef FEC_MIIGSK_ENR
+ if (fep->quirks & FEC_QUIRK_USE_GASKET) {
+ u32 cfgr;
+ /* disable the gasket and wait */
+ writel(0, fep->hwp + FEC_MIIGSK_ENR);
+ while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+ udelay(1);
+
+ /*
+ * configure the gasket:
+ * RMII, 50 MHz, no loopback, no echo
+ * MII, 25 MHz, no loopback, no echo
+ */
+ cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
+ if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
+ cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
+ writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
+
+ /* re-enable the gasket */
+ writel(2, fep->hwp + FEC_MIIGSK_ENR);
+ }
+#endif
+ }
+
+#if !defined(CONFIG_M5272)
+ /* enable pause frame*/
+ if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
+ ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
+ fep->phy_dev && fep->phy_dev->pause)) {
+ rcntl |= FEC_ENET_FCE;
+
+ /* set FIFO threshold parameter to reduce overrun */
+ writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
+ writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
+ writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
+ writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
+
+ /* OPD */
+ writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
+ } else {
+ rcntl &= ~FEC_ENET_FCE;
+ }
+#endif /* !defined(CONFIG_M5272) */
+
+ writel(rcntl, fep->hwp + FEC_R_CNTRL);
+
+ /* Setup multicast filter. */
+ set_multicast_list(ndev);
+#ifndef CONFIG_M5272
+ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
+
+ if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+ /* enable ENET endian swap */
+ ecntl |= (1 << 8);
+ /* enable ENET store and forward mode */
+ writel(1 << 8, fep->hwp + FEC_X_WMRK);
+ }
+
+ if (fep->bufdesc_ex)
+ ecntl |= (1 << 4);
+
+#ifndef CONFIG_M5272
+ /* Enable the MIB statistic event counters */
+ writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
+#endif
+
+ /* And last, enable the transmit and receive processing */
+ writel(ecntl, fep->hwp + FEC_ECNTRL);
+ fec_enet_active_rxring(ndev);
+
+ if (fep->bufdesc_ex)
+ fec_ptp_start_cyclecounter(ndev);
+
+ /* Enable interrupts we wish to service */
+ if (fep->link)
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ else
+ writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+
+ /* Init the interrupt coalescing */
+ fec_enet_itr_coal_init(ndev);
+
+}
+
+static void
+fec_stop(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+ u32 val;
+
+ /* We cannot expect a graceful transmit stop without link !!! */
+ if (fep->link) {
+ writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
+ udelay(10);
+ if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
+ netdev_err(ndev, "Graceful transmit stop did not complete!\n");
+ }
+
+ /* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+ if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ } else {
+ writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+
+ if (pdata && pdata->sleep_mode_enable)
+ pdata->sleep_mode_enable(true);
+ }
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+ /* We have to keep ENET enabled to have MII interrupt stay working */
+ if (fep->quirks & FEC_QUIRK_ENET_MAC &&
+ !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ writel(2, fep->hwp + FEC_ECNTRL);
+ writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+ }
+}
+
+
+static void
+fec_timeout(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ fec_dump(ndev);
+
+ ndev->stats.tx_errors++;
+
+ schedule_work(&fep->tx_timeout_work);
+}
+
+static void fec_enet_timeout_work(struct work_struct *work)
+{
+ struct fec_enet_private *fep =
+ container_of(work, struct fec_enet_private, tx_timeout_work);
+ struct net_device *ndev = fep->netdev;
+
+ rtnl_lock();
+ if (netif_device_present(ndev) || netif_running(ndev)) {
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ fec_restart(ndev);
+ netif_wake_queue(ndev);
+ netif_tx_unlock_bh(ndev);
+ napi_enable(&fep->napi);
+ }
+ rtnl_unlock();
+}
+
+static void
+fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
+ struct skb_shared_hwtstamps *hwtstamps)
+{
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ ns = timecounter_cyc2time(&fep->tc, ts);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void
+fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
+{
+ struct fec_enet_private *fep;
+ struct bufdesc *bdp;
+ unsigned short status;
+ struct sk_buff *skb;
+ struct fec_enet_priv_tx_q *txq;
+ struct netdev_queue *nq;
+ int index = 0;
+ int entries_free;
+
+ fep = netdev_priv(ndev);
+
+ queue_id = FEC_ENET_GET_QUQUE(queue_id);
+
+ txq = fep->tx_queue[queue_id];
+ /* get next bdp of dirty_tx */
+ nq = netdev_get_tx_queue(ndev, queue_id);
+ bdp = txq->dirty_tx;
+
+ /* get next bdp of dirty_tx */
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+
+ while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
+
+ /* current queue is empty */
+ if (bdp == txq->cur_tx)
+ break;
+
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+
+ skb = txq->tx_skbuff[index];
+ txq->tx_skbuff[index] = NULL;
+ if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ bdp->cbd_datlen, DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = 0;
+ if (!skb) {
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+ continue;
+ }
+
+ /* Check for errors. */
+ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ BD_ENET_TX_RL | BD_ENET_TX_UN |
+ BD_ENET_TX_CSL)) {
+ ndev->stats.tx_errors++;
+ if (status & BD_ENET_TX_HB) /* No heartbeat */
+ ndev->stats.tx_heartbeat_errors++;
+ if (status & BD_ENET_TX_LC) /* Late collision */
+ ndev->stats.tx_window_errors++;
+ if (status & BD_ENET_TX_RL) /* Retrans limit */
+ ndev->stats.tx_aborted_errors++;
+ if (status & BD_ENET_TX_UN) /* Underrun */
+ ndev->stats.tx_fifo_errors++;
+ if (status & BD_ENET_TX_CSL) /* Carrier lost */
+ ndev->stats.tx_carrier_errors++;
+ } else {
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ }
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ fep->bufdesc_ex) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+ /* Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+ if (status & BD_ENET_TX_DEF)
+ ndev->stats.collisions++;
+
+ /* Free the sk buffer associated with this last transmit */
+ dev_kfree_skb_any(skb);
+
+ txq->dirty_tx = bdp;
+
+ /* Update pointer to next buffer descriptor to be transmitted */
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+
+ /* Since we have freed up a buffer, the ring is no longer full
+ */
+ if (netif_queue_stopped(ndev)) {
+ entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+ if (entries_free >= txq->tx_wake_threshold)
+ netif_tx_wake_queue(nq);
+ }
+ }
+
+ /* ERR006538: Keep the transmitter going */
+ if (bdp != txq->cur_tx &&
+ readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0)
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id));
+}
+
+static void
+fec_enet_tx(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u16 queue_id;
+ /* First process class A queue, then Class B and Best Effort queue */
+ for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
+ clear_bit(queue_id, &fep->work_tx);
+ fec_enet_tx_queue(ndev, queue_id);
+ }
+ return;
+}
+
+static int
+fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int off;
+
+ off = ((unsigned long)skb->data) & fep->rx_align;
+ if (off)
+ skb_reserve(skb, fep->rx_align + 1 - off);
+
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+ if (net_ratelimit())
+ netdev_err(ndev, "Rx DMA memory map failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
+ struct bufdesc *bdp, u32 length, bool swap)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct sk_buff *new_skb;
+
+ if (length > fep->rx_copybreak)
+ return false;
+
+ new_skb = netdev_alloc_skb(ndev, length);
+ if (!new_skb)
+ return false;
+
+ dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
+ DMA_FROM_DEVICE);
+ if (!swap)
+ memcpy(new_skb->data, (*skb)->data, length);
+ else
+ swap_buffer2(new_skb->data, (*skb)->data, length);
+ *skb = new_skb;
+
+ return true;
+}
+
+/* During a receive, the cur_rx points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+ * not been given to the system, we just set the empty indicator,
+ * effectively tossing the packet.
+ */
+static int
+fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_rx_q *rxq;
+ struct bufdesc *bdp;
+ unsigned short status;
+ struct sk_buff *skb_new = NULL;
+ struct sk_buff *skb;
+ ushort pkt_len;
+ __u8 *data;
+ int pkt_received = 0;
+ struct bufdesc_ex *ebdp = NULL;
+ bool vlan_packet_rcvd = false;
+ u16 vlan_tag;
+ int index = 0;
+ bool is_copybreak;
+ bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
+
+#ifdef CONFIG_M532x
+ flush_cache_all();
+#endif
+ queue_id = FEC_ENET_GET_QUQUE(queue_id);
+ rxq = fep->rx_queue[queue_id];
+
+ /* First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = rxq->cur_rx;
+
+ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+
+ if (pkt_received >= budget)
+ break;
+ pkt_received++;
+
+ /* Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((status & BD_ENET_RX_LAST) == 0)
+ netdev_err(ndev, "rcv is not +last\n");
+
+
+ /* Check for errors. */
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+ BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ ndev->stats.rx_errors++;
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
+ /* Frame too long or too short. */
+ ndev->stats.rx_length_errors++;
+ }
+ if (status & BD_ENET_RX_NO) /* Frame alignment */
+ ndev->stats.rx_frame_errors++;
+ if (status & BD_ENET_RX_CR) /* CRC Error */
+ ndev->stats.rx_crc_errors++;
+ if (status & BD_ENET_RX_OV) /* FIFO overrun */
+ ndev->stats.rx_fifo_errors++;
+ }
+
+ /* Report late collisions as a frame error.
+ * On this error, the BD is closed, but we don't know what we
+ * have in the buffer. So, just drop this frame on the floor.
+ */
+ if (status & BD_ENET_RX_CL) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_frame_errors++;
+ goto rx_processing_done;
+ }
+
+ /* Process the incoming frame. */
+ ndev->stats.rx_packets++;
+ pkt_len = bdp->cbd_datlen;
+ ndev->stats.rx_bytes += pkt_len;
+
+ index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
+ skb = rxq->rx_skbuff[index];
+
+ /* The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+ is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
+ need_swap);
+ if (!is_copybreak) {
+ skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
+ if (unlikely(!skb_new)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
+ DMA_FROM_DEVICE);
+ }
+
+ prefetch(skb->data - NET_IP_ALIGN);
+ skb_put(skb, pkt_len - 4);
+ data = skb->data;
+ if (!is_copybreak && need_swap)
+ swap_buffer(data, pkt_len);
+
+ /* Extract the enhanced buffer descriptor */
+ ebdp = NULL;
+ if (fep->bufdesc_ex)
+ ebdp = (struct bufdesc_ex *)bdp;
+
+ /* If this is a VLAN packet remove the VLAN Tag */
+ vlan_packet_rcvd = false;
+ if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
+ /* Push and remove the vlan tag */
+ struct vlan_hdr *vlan_header =
+ (struct vlan_hdr *) (data + ETH_HLEN);
+ vlan_tag = ntohs(vlan_header->h_vlan_TCI);
+
+ vlan_packet_rcvd = true;
+
+ memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* Get receive timestamp from the skb */
+ if (fep->hwts_rx_en && fep->bufdesc_ex)
+ fec_enet_hwtstamp(fep, ebdp->ts,
+ skb_hwtstamps(skb));
+
+ if (fep->bufdesc_ex &&
+ (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
+ if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
+ /* don't check it */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+ }
+
+ /* Handle received VLAN packets */
+ if (vlan_packet_rcvd)
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+
+ napi_gro_receive(&fep->napi, skb);
+
+ if (is_copybreak) {
+ dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
+ DMA_FROM_DEVICE);
+ } else {
+ rxq->rx_skbuff[index] = skb_new;
+ fec_enet_new_rxbdp(ndev, bdp, skb_new);
+ }
+
+rx_processing_done:
+ /* Clear the status flags for this buffer */
+ status &= ~BD_ENET_RX_STATS;
+
+ /* Mark the buffer empty */
+ status |= BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = status;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
+ }
+
+ /* Update BD pointer to next entry */
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+
+ /* Doing this here will keep the FEC running while we process
+ * incoming frames. On a heavily loaded network, we should be
+ * able to keep up at the expense of system resources.
+ */
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id));
+ }
+ rxq->cur_rx = bdp;
+ return pkt_received;
+}
+
+static int
+fec_enet_rx(struct net_device *ndev, int budget)
+{
+ int pkt_received = 0;
+ u16 queue_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
+ clear_bit(queue_id, &fep->work_rx);
+ pkt_received += fec_enet_rx_queue(ndev,
+ budget - pkt_received, queue_id);
+ }
+ return pkt_received;
+}
+
+static bool
+fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
+{
+ if (int_events == 0)
+ return false;
+
+ if (int_events & FEC_ENET_RXF)
+ fep->work_rx |= (1 << 2);
+ if (int_events & FEC_ENET_RXF_1)
+ fep->work_rx |= (1 << 0);
+ if (int_events & FEC_ENET_RXF_2)
+ fep->work_rx |= (1 << 1);
+
+ if (int_events & FEC_ENET_TXF)
+ fep->work_tx |= (1 << 2);
+ if (int_events & FEC_ENET_TXF_1)
+ fep->work_tx |= (1 << 0);
+ if (int_events & FEC_ENET_TXF_2)
+ fep->work_tx |= (1 << 1);
+
+ return true;
+}
+
+static irqreturn_t
+fec_enet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ uint int_events;
+ irqreturn_t ret = IRQ_NONE;
+
+ int_events = readl(fep->hwp + FEC_IEVENT);
+ writel(int_events, fep->hwp + FEC_IEVENT);
+ fec_enet_collect_events(fep, int_events);
+
+ if ((fep->work_tx || fep->work_rx) && fep->link) {
+ ret = IRQ_HANDLED;
+
+ if (napi_schedule_prep(&fep->napi)) {
+ /* Disable the NAPI interrupts */
+ writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+ __napi_schedule(&fep->napi);
+ }
+ }
+
+ if (int_events & FEC_ENET_MII) {
+ ret = IRQ_HANDLED;
+ complete(&fep->mdio_done);
+ }
+
+ if (fep->ptp_clock)
+ fec_ptp_check_pps_event(fep);
+
+ return ret;
+}
+
+static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct net_device *ndev = napi->dev;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int pkts;
+
+ pkts = fec_enet_rx(ndev, budget);
+
+ fec_enet_tx(ndev);
+
+ if (pkts < budget) {
+ napi_complete(napi);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ }
+ return pkts;
+}
+
+/* ------------------------------------------------------------------------- */
+static void fec_get_mac(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
+ unsigned char *iap, tmpaddr[ETH_ALEN];
+
+ /*
+ * try to get mac address in following order:
+ *
+ * 1) module parameter via kernel command line in form
+ * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
+ */
+ iap = macaddr;
+
+ /*
+ * 2) from device tree data
+ */
+ if (!is_valid_ether_addr(iap)) {
+ struct device_node *np = fep->pdev->dev.of_node;
+ if (np) {
+ const char *mac = of_get_mac_address(np);
+ if (mac)
+ iap = (unsigned char *) mac;
+ }
+ }
+
+ /*
+ * 3) from flash or fuse (via platform data)
+ */
+ if (!is_valid_ether_addr(iap)) {
+#ifdef CONFIG_M5272
+ if (FEC_FLASHMAC)
+ iap = (unsigned char *)FEC_FLASHMAC;
+#else
+ if (pdata)
+ iap = (unsigned char *)&pdata->mac;
+#endif
+ }
+
+ /*
+ * 4) FEC mac registers set by bootloader
+ */
+ if (!is_valid_ether_addr(iap)) {
+ *((__be32 *) &tmpaddr[0]) =
+ cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
+ *((__be16 *) &tmpaddr[4]) =
+ cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ iap = &tmpaddr[0];
+ }
+
+ /*
+ * 5) random mac address
+ */
+ if (!is_valid_ether_addr(iap)) {
+ /* Report it and use a random ethernet address instead */
+ netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
+ eth_hw_addr_random(ndev);
+ netdev_info(ndev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
+ return;
+ }
+
+ memcpy(ndev->dev_addr, iap, ETH_ALEN);
+
+ /* Adjust MAC if using macaddr */
+ if (iap == macaddr)
+ ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Phy section
+ */
+static void fec_enet_adjust_link(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = fep->phy_dev;
+ int status_change = 0;
+
+ /* Prevent a state halted on mii error */
+ if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
+ phy_dev->state = PHY_RESUMING;
+ return;
+ }
+
+ /*
+ * If the netdev is down, or is going down, we're not interested
+ * in link state events, so just mark our idea of the link as down
+ * and ignore the event.
+ */
+ if (!netif_running(ndev) || !netif_device_present(ndev)) {
+ fep->link = 0;
+ } else if (phy_dev->link) {
+ if (!fep->link) {
+ fep->link = phy_dev->link;
+ status_change = 1;
+ }
+
+ if (fep->full_duplex != phy_dev->duplex) {
+ fep->full_duplex = phy_dev->duplex;
+ status_change = 1;
+ }
+
+ if (phy_dev->speed != fep->speed) {
+ fep->speed = phy_dev->speed;
+ status_change = 1;
+ }
+
+ /* if any of the above changed restart the FEC */
+ if (status_change) {
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ fec_restart(ndev);
+ netif_wake_queue(ndev);
+ netif_tx_unlock_bh(ndev);
+ napi_enable(&fep->napi);
+ }
+ } else {
+ if (fep->link) {
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ fec_stop(ndev);
+ netif_tx_unlock_bh(ndev);
+ napi_enable(&fep->napi);
+ fep->link = phy_dev->link;
+ status_change = 1;
+ }
+ }
+
+ if (status_change)
+ phy_print_status(phy_dev);
+}
+
+static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct fec_enet_private *fep = bus->priv;
+ unsigned long time_left;
+
+ fep->mii_timeout = 0;
+ init_completion(&fep->mdio_done);
+
+ /* start a read op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ time_left = wait_for_completion_timeout(&fep->mdio_done,
+ usecs_to_jiffies(FEC_MII_TIMEOUT));
+ if (time_left == 0) {
+ fep->mii_timeout = 1;
+ netdev_err(fep->netdev, "MDIO read timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* return value */
+ return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+}
+
+static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct fec_enet_private *fep = bus->priv;
+ unsigned long time_left;
+
+ fep->mii_timeout = 0;
+ init_completion(&fep->mdio_done);
+
+ /* start a write op */
+ writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ time_left = wait_for_completion_timeout(&fep->mdio_done,
+ usecs_to_jiffies(FEC_MII_TIMEOUT));
+ if (time_left == 0) {
+ fep->mii_timeout = 1;
+ netdev_err(fep->netdev, "MDIO write timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
+
+ if (enable) {
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+ if (fep->clk_enet_out) {
+ ret = clk_prepare_enable(fep->clk_enet_out);
+ if (ret)
+ goto failed_clk_enet_out;
+ }
+ if (fep->clk_ptp) {
+ mutex_lock(&fep->ptp_clk_mutex);
+ ret = clk_prepare_enable(fep->clk_ptp);
+ if (ret) {
+ mutex_unlock(&fep->ptp_clk_mutex);
+ goto failed_clk_ptp;
+ } else {
+ fep->ptp_clk_on = true;
+ }
+ mutex_unlock(&fep->ptp_clk_mutex);
+ }
+ if (fep->clk_ref) {
+ ret = clk_prepare_enable(fep->clk_ref);
+ if (ret)
+ goto failed_clk_ref;
+ }
+ } else {
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+ if (fep->clk_ptp) {
+ mutex_lock(&fep->ptp_clk_mutex);
+ clk_disable_unprepare(fep->clk_ptp);
+ fep->ptp_clk_on = false;
+ mutex_unlock(&fep->ptp_clk_mutex);
+ }
+ if (fep->clk_ref)
+ clk_disable_unprepare(fep->clk_ref);
+ }
+
+ return 0;
+
+failed_clk_ref:
+ if (fep->clk_ref)
+ clk_disable_unprepare(fep->clk_ref);
+failed_clk_ptp:
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
+
+ return ret;
+}
+
+static int fec_enet_mii_probe(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = NULL;
+ char mdio_bus_id[MII_BUS_ID_SIZE];
+ char phy_name[MII_BUS_ID_SIZE + 3];
+ int phy_id;
+ int dev_id = fep->dev_id;
+
+ fep->phy_dev = NULL;
+
+ if (fep->phy_node) {
+ phy_dev = of_phy_connect(ndev, fep->phy_node,
+ &fec_enet_adjust_link, 0,
+ fep->phy_interface);
+ if (!phy_dev)
+ return -ENODEV;
+ } else {
+ /* check for attached phy */
+ for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
+ if ((fep->mii_bus->phy_mask & (1 << phy_id)))
+ continue;
+ if (fep->mii_bus->phy_map[phy_id] == NULL)
+ continue;
+ if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
+ continue;
+ if (dev_id--)
+ continue;
+ strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+ break;
+ }
+
+ if (phy_id >= PHY_MAX_ADDR) {
+ netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
+ strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
+ phy_id = 0;
+ }
+
+ snprintf(phy_name, sizeof(phy_name),
+ PHY_ID_FMT, mdio_bus_id, phy_id);
+ phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
+ fep->phy_interface);
+ }
+
+ if (IS_ERR(phy_dev)) {
+ netdev_err(ndev, "could not attach to PHY\n");
+ return PTR_ERR(phy_dev);
+ }
+
+ /* mask with MAC supported features */
+ if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
+ phy_dev->supported &= PHY_GBIT_FEATURES;
+ phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
+#if !defined(CONFIG_M5272)
+ phy_dev->supported |= SUPPORTED_Pause;
+#endif
+ }
+ else
+ phy_dev->supported &= PHY_BASIC_FEATURES;
+
+ phy_dev->advertising = phy_dev->supported;
+
+ fep->phy_dev = phy_dev;
+ fep->link = 0;
+ fep->full_duplex = 0;
+
+ netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
+ fep->phy_dev->irq);
+
+ return 0;
+}
+
+static int fec_enet_mii_init(struct platform_device *pdev)
+{
+ static struct mii_bus *fec0_mii_bus;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct device_node *node;
+ int err = -ENXIO, i;
+ u32 mii_speed, holdtime;
+
+ /*
+ * The i.MX28 dual fec interfaces are not equal.
+ * Here are the differences:
+ *
+ * - fec0 supports MII & RMII modes while fec1 only supports RMII
+ * - fec0 acts as the 1588 time master while fec1 is slave
+ * - external phys can only be configured by fec0
+ *
+ * That is to say fec1 can not work independently. It only works
+ * when fec0 is working. The reason behind this design is that the
+ * second interface is added primarily for Switch mode.
+ *
+ * Because of the last point above, both phys are attached on fec0
+ * mdio interface in board design, and need to be configured by
+ * fec0 mii_bus.
+ */
+ if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
+ /* fec1 uses fec0 mii_bus */
+ if (mii_cnt && fec0_mii_bus) {
+ fep->mii_bus = fec0_mii_bus;
+ mii_cnt++;
+ return 0;
+ }
+ return -ENOENT;
+ }
+
+ fep->mii_timeout = 0;
+
+ /*
+ * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
+ *
+ * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
+ * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
+ * Reference Manual has an error on this, and gets fixed on i.MX6Q
+ * document.
+ */
+ mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
+ if (fep->quirks & FEC_QUIRK_ENET_MAC)
+ mii_speed--;
+ if (mii_speed > 63) {
+ dev_err(&pdev->dev,
+ "fec clock (%lu) to fast to get right mii speed\n",
+ clk_get_rate(fep->clk_ipg));
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ /*
+ * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
+ * MII_SPEED) register that defines the MDIO output hold time. Earlier
+ * versions are RAZ there, so just ignore the difference and write the
+ * register always.
+ * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
+ * HOLDTIME + 1 is the number of clk cycles the fec is holding the
+ * output.
+ * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
+ * Given that ceil(clkrate / 5000000) <= 64, the calculation for
+ * holdtime cannot result in a value greater than 3.
+ */
+ holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
+
+ fep->phy_speed = mii_speed << 1 | holdtime << 8;
+
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+ fep->mii_bus = mdiobus_alloc();
+ if (fep->mii_bus == NULL) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ fep->mii_bus->name = "fec_enet_mii_bus";
+ fep->mii_bus->read = fec_enet_mdio_read;
+ fep->mii_bus->write = fec_enet_mdio_write;
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, fep->dev_id + 1);
+ fep->mii_bus->priv = fep;
+ fep->mii_bus->parent = &pdev->dev;
+
+ fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!fep->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_free_mdiobus;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ fep->mii_bus->irq[i] = PHY_POLL;
+
+ node = of_get_child_by_name(pdev->dev.of_node, "mdio");
+ if (node) {
+ err = of_mdiobus_register(fep->mii_bus, node);
+ of_node_put(node);
+ } else {
+ err = mdiobus_register(fep->mii_bus);
+ }
+
+ if (err)
+ goto err_out_free_mdio_irq;
+
+ mii_cnt++;
+
+ /* save fec0 mii_bus */
+ if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
+ fec0_mii_bus = fep->mii_bus;
+
+ return 0;
+
+err_out_free_mdio_irq:
+ kfree(fep->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(fep->mii_bus);
+err_out:
+ return err;
+}
+
+static void fec_enet_mii_remove(struct fec_enet_private *fep)
+{
+ if (--mii_cnt == 0) {
+ mdiobus_unregister(fep->mii_bus);
+ kfree(fep->mii_bus->irq);
+ mdiobus_free(fep->mii_bus);
+ }
+}
+
+static int fec_enet_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phydev = fep->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int fec_enet_set_settings(struct net_device *ndev,
+ struct ethtool_cmd *cmd)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phydev = fep->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static void fec_enet_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ strlcpy(info->driver, fep->pdev->dev.driver->name,
+ sizeof(info->driver));
+ strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
+}
+
+static int fec_enet_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (fep->bufdesc_ex) {
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ if (fep->ptp_clock)
+ info->phc_index = ptp_clock_index(fep->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ return 0;
+ } else {
+ return ethtool_op_get_ts_info(ndev, info);
+ }
+}
+
+#if !defined(CONFIG_M5272)
+
+static void fec_enet_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
+ pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
+ pause->rx_pause = pause->tx_pause;
+}
+
+static int fec_enet_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (!fep->phy_dev)
+ return -ENODEV;
+
+ if (pause->tx_pause != pause->rx_pause) {
+ netdev_info(ndev,
+ "hardware only support enable/disable both tx and rx");
+ return -EINVAL;
+ }
+
+ fep->pause_flag = 0;
+
+ /* tx pause must be same as rx pause */
+ fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
+ fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
+
+ if (pause->rx_pause || pause->autoneg) {
+ fep->phy_dev->supported |= ADVERTISED_Pause;
+ fep->phy_dev->advertising |= ADVERTISED_Pause;
+ } else {
+ fep->phy_dev->supported &= ~ADVERTISED_Pause;
+ fep->phy_dev->advertising &= ~ADVERTISED_Pause;
+ }
+
+ if (pause->autoneg) {
+ if (netif_running(ndev))
+ fec_stop(ndev);
+ phy_start_aneg(fep->phy_dev);
+ }
+ if (netif_running(ndev)) {
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ fec_restart(ndev);
+ netif_wake_queue(ndev);
+ netif_tx_unlock_bh(ndev);
+ napi_enable(&fep->napi);
+ }
+
+ return 0;
+}
+
+static const struct fec_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} fec_stats[] = {
+ /* RMON TX */
+ { "tx_dropped", RMON_T_DROP },
+ { "tx_packets", RMON_T_PACKETS },
+ { "tx_broadcast", RMON_T_BC_PKT },
+ { "tx_multicast", RMON_T_MC_PKT },
+ { "tx_crc_errors", RMON_T_CRC_ALIGN },
+ { "tx_undersize", RMON_T_UNDERSIZE },
+ { "tx_oversize", RMON_T_OVERSIZE },
+ { "tx_fragment", RMON_T_FRAG },
+ { "tx_jabber", RMON_T_JAB },
+ { "tx_collision", RMON_T_COL },
+ { "tx_64byte", RMON_T_P64 },
+ { "tx_65to127byte", RMON_T_P65TO127 },
+ { "tx_128to255byte", RMON_T_P128TO255 },
+ { "tx_256to511byte", RMON_T_P256TO511 },
+ { "tx_512to1023byte", RMON_T_P512TO1023 },
+ { "tx_1024to2047byte", RMON_T_P1024TO2047 },
+ { "tx_GTE2048byte", RMON_T_P_GTE2048 },
+ { "tx_octets", RMON_T_OCTETS },
+
+ /* IEEE TX */
+ { "IEEE_tx_drop", IEEE_T_DROP },
+ { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
+ { "IEEE_tx_1col", IEEE_T_1COL },
+ { "IEEE_tx_mcol", IEEE_T_MCOL },
+ { "IEEE_tx_def", IEEE_T_DEF },
+ { "IEEE_tx_lcol", IEEE_T_LCOL },
+ { "IEEE_tx_excol", IEEE_T_EXCOL },
+ { "IEEE_tx_macerr", IEEE_T_MACERR },
+ { "IEEE_tx_cserr", IEEE_T_CSERR },
+ { "IEEE_tx_sqe", IEEE_T_SQE },
+ { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
+ { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
+
+ /* RMON RX */
+ { "rx_packets", RMON_R_PACKETS },
+ { "rx_broadcast", RMON_R_BC_PKT },
+ { "rx_multicast", RMON_R_MC_PKT },
+ { "rx_crc_errors", RMON_R_CRC_ALIGN },
+ { "rx_undersize", RMON_R_UNDERSIZE },
+ { "rx_oversize", RMON_R_OVERSIZE },
+ { "rx_fragment", RMON_R_FRAG },
+ { "rx_jabber", RMON_R_JAB },
+ { "rx_64byte", RMON_R_P64 },
+ { "rx_65to127byte", RMON_R_P65TO127 },
+ { "rx_128to255byte", RMON_R_P128TO255 },
+ { "rx_256to511byte", RMON_R_P256TO511 },
+ { "rx_512to1023byte", RMON_R_P512TO1023 },
+ { "rx_1024to2047byte", RMON_R_P1024TO2047 },
+ { "rx_GTE2048byte", RMON_R_P_GTE2048 },
+ { "rx_octets", RMON_R_OCTETS },
+
+ /* IEEE RX */
+ { "IEEE_rx_drop", IEEE_R_DROP },
+ { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
+ { "IEEE_rx_crc", IEEE_R_CRC },
+ { "IEEE_rx_align", IEEE_R_ALIGN },
+ { "IEEE_rx_macerr", IEEE_R_MACERR },
+ { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
+ { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
+};
+
+static void fec_enet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+ data[i] = readl(fep->hwp + fec_stats[i].offset);
+}
+
+static void fec_enet_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ int i;
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ fec_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static int fec_enet_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(fec_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+#endif /* !defined(CONFIG_M5272) */
+
+static int fec_enet_nway_reset(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phy_dev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return genphy_restart_aneg(phydev);
+}
+
+/* ITR clock source is enet system clock (clk_ahb).
+ * TCTT unit is cycle_ns * 64 cycle
+ * So, the ICTT value = X us / (cycle_ns * 64)
+ */
+static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return us * (fep->itr_clk_rate / 64000) / 1000;
+}
+
+/* Set threshold for interrupt coalescing */
+static void fec_enet_itr_coal_set(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int rx_itr, tx_itr;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ return;
+
+ /* Must be greater than zero to avoid unpredictable behavior */
+ if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
+ !fep->tx_time_itr || !fep->tx_pkts_itr)
+ return;
+
+ /* Select enet system clock as Interrupt Coalescing
+ * timer Clock Source
+ */
+ rx_itr = FEC_ITR_CLK_SEL;
+ tx_itr = FEC_ITR_CLK_SEL;
+
+ /* set ICFT and ICTT */
+ rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
+ rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
+ tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
+ tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
+
+ rx_itr |= FEC_ITR_EN;
+ tx_itr |= FEC_ITR_EN;
+
+ writel(tx_itr, fep->hwp + FEC_TXIC0);
+ writel(rx_itr, fep->hwp + FEC_RXIC0);
+ writel(tx_itr, fep->hwp + FEC_TXIC1);
+ writel(rx_itr, fep->hwp + FEC_RXIC1);
+ writel(tx_itr, fep->hwp + FEC_TXIC2);
+ writel(rx_itr, fep->hwp + FEC_RXIC2);
+}
+
+static int
+fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ return -EOPNOTSUPP;
+
+ ec->rx_coalesce_usecs = fep->rx_time_itr;
+ ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
+
+ ec->tx_coalesce_usecs = fep->tx_time_itr;
+ ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
+
+ return 0;
+}
+
+static int
+fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int cycle;
+
+ if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ return -EOPNOTSUPP;
+
+ if (ec->rx_max_coalesced_frames > 255) {
+ pr_err("Rx coalesced frames exceed hardware limiation");
+ return -EINVAL;
+ }
+
+ if (ec->tx_max_coalesced_frames > 255) {
+ pr_err("Tx coalesced frame exceed hardware limiation");
+ return -EINVAL;
+ }
+
+ cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+ if (cycle > 0xFFFF) {
+ pr_err("Rx coalesed usec exceeed hardware limiation");
+ return -EINVAL;
+ }
+
+ cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
+ if (cycle > 0xFFFF) {
+ pr_err("Rx coalesed usec exceeed hardware limiation");
+ return -EINVAL;
+ }
+
+ fep->rx_time_itr = ec->rx_coalesce_usecs;
+ fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
+
+ fep->tx_time_itr = ec->tx_coalesce_usecs;
+ fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
+
+ fec_enet_itr_coal_set(ndev);
+
+ return 0;
+}
+
+static void fec_enet_itr_coal_init(struct net_device *ndev)
+{
+ struct ethtool_coalesce ec;
+
+ ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
+ ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
+
+ ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
+ ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
+
+ fec_enet_set_coalesce(ndev, &ec);
+}
+
+static int fec_enet_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = fep->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int fec_enet_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ fep->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static void
+fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
+ } else {
+ wol->supported = wol->wolopts = 0;
+ }
+}
+
+static int
+fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
+ return -EINVAL;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
+ if (device_may_wakeup(&ndev->dev)) {
+ fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
+ if (fep->irq[0] > 0)
+ enable_irq_wake(fep->irq[0]);
+ } else {
+ fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
+ if (fep->irq[0] > 0)
+ disable_irq_wake(fep->irq[0]);
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops fec_enet_ethtool_ops = {
+ .get_settings = fec_enet_get_settings,
+ .set_settings = fec_enet_set_settings,
+ .get_drvinfo = fec_enet_get_drvinfo,
+ .nway_reset = fec_enet_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = fec_enet_get_coalesce,
+ .set_coalesce = fec_enet_set_coalesce,
+#ifndef CONFIG_M5272
+ .get_pauseparam = fec_enet_get_pauseparam,
+ .set_pauseparam = fec_enet_set_pauseparam,
+ .get_strings = fec_enet_get_strings,
+ .get_ethtool_stats = fec_enet_get_ethtool_stats,
+ .get_sset_count = fec_enet_get_sset_count,
+#endif
+ .get_ts_info = fec_enet_get_ts_info,
+ .get_tunable = fec_enet_get_tunable,
+ .set_tunable = fec_enet_set_tunable,
+ .get_wol = fec_enet_get_wol,
+ .set_wol = fec_enet_set_wol,
+};
+
+static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phydev = fep->phy_dev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ if (fep->bufdesc_ex) {
+ if (cmd == SIOCSHWTSTAMP)
+ return fec_ptp_set(ndev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return fec_ptp_get(ndev, rq);
+ }
+
+ return phy_mii_ioctl(phydev, rq, cmd);
+}
+
+static void fec_enet_free_buffers(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+ struct sk_buff *skb;
+ struct bufdesc *bdp;
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
+ unsigned int q;
+
+ for (q = 0; q < fep->num_rx_queues; q++) {
+ rxq = fep->rx_queue[q];
+ bdp = rxq->rx_bd_base;
+ for (i = 0; i < rxq->rx_ring_size; i++) {
+ skb = rxq->rx_skbuff[i];
+ rxq->rx_skbuff[i] = NULL;
+ if (skb) {
+ dma_unmap_single(&fep->pdev->dev,
+ bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ }
+ }
+
+ for (q = 0; q < fep->num_tx_queues; q++) {
+ txq = fep->tx_queue[q];
+ bdp = txq->tx_bd_base;
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ kfree(txq->tx_bounce[i]);
+ txq->tx_bounce[i] = NULL;
+ skb = txq->tx_skbuff[i];
+ txq->tx_skbuff[i] = NULL;
+ dev_kfree_skb(skb);
+ }
+ }
+}
+
+static void fec_enet_free_queue(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+ struct fec_enet_priv_tx_q *txq;
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
+ txq = fep->tx_queue[i];
+ dma_free_coherent(NULL,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ txq->tso_hdrs,
+ txq->tso_hdrs_dma);
+ }
+
+ for (i = 0; i < fep->num_rx_queues; i++)
+ kfree(fep->rx_queue[i]);
+ for (i = 0; i < fep->num_tx_queues; i++)
+ kfree(fep->tx_queue[i]);
+}
+
+static int fec_enet_alloc_queue(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+ int ret = 0;
+ struct fec_enet_priv_tx_q *txq;
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ fep->tx_queue[i] = txq;
+ txq->tx_ring_size = TX_RING_SIZE;
+ fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size;
+
+ txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
+ txq->tx_wake_threshold =
+ (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
+
+ txq->tso_hdrs = dma_alloc_coherent(NULL,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_dma,
+ GFP_KERNEL);
+ if (!txq->tso_hdrs) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+ }
+
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
+ GFP_KERNEL);
+ if (!fep->rx_queue[i]) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE;
+ fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size;
+ }
+ return ret;
+
+alloc_failed:
+ fec_enet_free_queue(ndev);
+ return ret;
+}
+
+static int
+fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+ struct sk_buff *skb;
+ struct bufdesc *bdp;
+ struct fec_enet_priv_rx_q *rxq;
+
+ rxq = fep->rx_queue[queue];
+ bdp = rxq->rx_bd_base;
+ for (i = 0; i < rxq->rx_ring_size; i++) {
+ skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
+ if (!skb)
+ goto err_alloc;
+
+ if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
+ dev_kfree_skb(skb);
+ goto err_alloc;
+ }
+
+ rxq->rx_skbuff[i] = skb;
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ }
+
+ /* Set the last buffer to wrap. */
+ bdp = fec_enet_get_prevdesc(bdp, fep, queue);
+ bdp->cbd_sc |= BD_SC_WRAP;
+ return 0;
+
+ err_alloc:
+ fec_enet_free_buffers(ndev);
+ return -ENOMEM;
+}
+
+static int
+fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+ struct bufdesc *bdp;
+ struct fec_enet_priv_tx_q *txq;
+
+ txq = fep->tx_queue[queue];
+ bdp = txq->tx_bd_base;
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
+ if (!txq->tx_bounce[i])
+ goto err_alloc;
+
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_esc = BD_ENET_TX_INT;
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ }
+
+ /* Set the last buffer to wrap. */
+ bdp = fec_enet_get_prevdesc(bdp, fep, queue);
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ return 0;
+
+ err_alloc:
+ fec_enet_free_buffers(ndev);
+ return -ENOMEM;
+}
+
+static int fec_enet_alloc_buffers(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+
+ for (i = 0; i < fep->num_rx_queues; i++)
+ if (fec_enet_alloc_rxq_buffers(ndev, i))
+ return -ENOMEM;
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fec_enet_alloc_txq_buffers(ndev, i))
+ return -ENOMEM;
+ return 0;
+}
+
+static int
+fec_enet_open(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
+
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ ret = fec_enet_clk_enable(ndev, true);
+ if (ret)
+ return ret;
+
+ /* I should reset the ring buffers here, but I don't yet know
+ * a simple way to do that.
+ */
+
+ ret = fec_enet_alloc_buffers(ndev);
+ if (ret)
+ goto err_enet_alloc;
+
+ /* Probe and connect to PHY when open the interface */
+ ret = fec_enet_mii_probe(ndev);
+ if (ret)
+ goto err_enet_mii_probe;
+
+ fec_restart(ndev);
+ napi_enable(&fep->napi);
+ phy_start(fep->phy_dev);
+ netif_tx_start_all_queues(ndev);
+
+ device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
+ FEC_WOL_FLAG_ENABLE);
+
+ return 0;
+
+err_enet_mii_probe:
+ fec_enet_free_buffers(ndev);
+err_enet_alloc:
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ return ret;
+}
+
+static int
+fec_enet_close(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ phy_stop(fep->phy_dev);
+
+ if (netif_device_present(ndev)) {
+ napi_disable(&fep->napi);
+ netif_tx_disable(ndev);
+ fec_stop(ndev);
+ }
+
+ phy_disconnect(fep->phy_dev);
+ fep->phy_dev = NULL;
+
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ fec_enet_free_buffers(ndev);
+
+ return 0;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering. Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not. I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+
+#define HASH_BITS 6 /* #bits in hash */
+#define CRC32_POLY 0xEDB88320
+
+static void set_multicast_list(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ unsigned int i, bit, data, crc, tmp;
+ unsigned char hash;
+
+ if (ndev->flags & IFF_PROMISC) {
+ tmp = readl(fep->hwp + FEC_R_CNTRL);
+ tmp |= 0x8;
+ writel(tmp, fep->hwp + FEC_R_CNTRL);
+ return;
+ }
+
+ tmp = readl(fep->hwp + FEC_R_CNTRL);
+ tmp &= ~0x8;
+ writel(tmp, fep->hwp + FEC_R_CNTRL);
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ /* Catch all multicast addresses, so set the
+ * filter to all 1's
+ */
+ writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+
+ return;
+ }
+
+ /* Clear filter and add the addresses in hash register
+ */
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ /* calculate crc32 value of mac address */
+ crc = 0xffffffff;
+
+ for (i = 0; i < ndev->addr_len; i++) {
+ data = ha->addr[i];
+ for (bit = 0; bit < 8; bit++, data >>= 1) {
+ crc = (crc >> 1) ^
+ (((crc ^ data) & 1) ? CRC32_POLY : 0);
+ }
+ }
+
+ /* only upper 6 bits (HASH_BITS) are used
+ * which point to specific bit in he hash registers
+ */
+ hash = (crc >> (32 - HASH_BITS)) & 0x3f;
+
+ if (hash > 31) {
+ tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ tmp |= 1 << (hash - 32);
+ writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ } else {
+ tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+ tmp |= 1 << hash;
+ writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+ }
+ }
+}
+
+/* Set a MAC change in hardware. */
+static int
+fec_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+
+ if (addr) {
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ }
+
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
+ fep->hwp + FEC_ADDR_HIGH);
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * fec_poll_controller - FEC Poll controller function
+ * @dev: The FEC network adapter
+ *
+ * Polled functionality used by netconsole and others in non interrupt mode
+ *
+ */
+static void fec_poll_controller(struct net_device *dev)
+{
+ int i;
+ struct fec_enet_private *fep = netdev_priv(dev);
+
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ if (fep->irq[i] > 0) {
+ disable_irq(fep->irq[i]);
+ fec_enet_interrupt(fep->irq[i], dev);
+ enable_irq(fep->irq[i]);
+ }
+ }
+}
+#endif
+
+#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
+static inline void fec_enet_set_netdev_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
+
+ netdev->features = features;
+
+ /* Receive checksum has been changed */
+ if (changed & NETIF_F_RXCSUM) {
+ if (features & NETIF_F_RXCSUM)
+ fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+ else
+ fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
+ }
+}
+
+static int fec_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
+
+ if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(netdev);
+ fec_stop(netdev);
+ fec_enet_set_netdev_features(netdev, features);
+ fec_restart(netdev);
+ netif_tx_wake_all_queues(netdev);
+ netif_tx_unlock_bh(netdev);
+ napi_enable(&fep->napi);
+ } else {
+ fec_enet_set_netdev_features(netdev, features);
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops fec_netdev_ops = {
+ .ndo_open = fec_enet_open,
+ .ndo_stop = fec_enet_close,
+ .ndo_start_xmit = fec_enet_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = fec_timeout,
+ .ndo_set_mac_address = fec_set_mac_address,
+ .ndo_do_ioctl = fec_enet_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = fec_poll_controller,
+#endif
+ .ndo_set_features = fec_set_features,
+};
+
+ /*
+ * XXX: We need to clean up on failure exits here.
+ *
+ */
+static int fec_enet_init(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
+ struct bufdesc *cbd_base;
+ dma_addr_t bd_dma;
+ int bd_size;
+ unsigned int i;
+
+#if defined(CONFIG_ARM)
+ fep->rx_align = 0xf;
+ fep->tx_align = 0xf;
+#else
+ fep->rx_align = 0x3;
+ fep->tx_align = 0x3;
+#endif
+
+ fec_enet_alloc_queue(ndev);
+
+ if (fep->bufdesc_ex)
+ fep->bufdesc_size = sizeof(struct bufdesc_ex);
+ else
+ fep->bufdesc_size = sizeof(struct bufdesc);
+ bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) *
+ fep->bufdesc_size;
+
+ /* Allocate memory for buffer descriptors. */
+ cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
+ GFP_KERNEL);
+ if (!cbd_base) {
+ return -ENOMEM;
+ }
+
+ memset(cbd_base, 0, bd_size);
+
+ /* Get the Ethernet address */
+ fec_get_mac(ndev);
+ /* make sure MAC we just acquired is programmed into the hw */
+ fec_set_mac_address(ndev, NULL);
+
+ /* Set receive and transmit descriptor base. */
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ rxq = fep->rx_queue[i];
+ rxq->index = i;
+ rxq->rx_bd_base = (struct bufdesc *)cbd_base;
+ rxq->bd_dma = bd_dma;
+ if (fep->bufdesc_ex) {
+ bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size;
+ cbd_base = (struct bufdesc *)
+ (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
+ } else {
+ bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size;
+ cbd_base += rxq->rx_ring_size;
+ }
+ }
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+ txq->index = i;
+ txq->tx_bd_base = (struct bufdesc *)cbd_base;
+ txq->bd_dma = bd_dma;
+ if (fep->bufdesc_ex) {
+ bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size;
+ cbd_base = (struct bufdesc *)
+ (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size);
+ } else {
+ bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size;
+ cbd_base += txq->tx_ring_size;
+ }
+ }
+
+
+ /* The FEC Ethernet specific entries in the device structure */
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->netdev_ops = &fec_netdev_ops;
+ ndev->ethtool_ops = &fec_enet_ethtool_ops;
+
+ writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
+
+ if (fep->quirks & FEC_QUIRK_HAS_VLAN)
+ /* enable hw VLAN support */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
+ ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
+
+ /* enable hw accelerator */
+ ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
+ fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+ }
+
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ fep->tx_align = 0;
+ fep->rx_align = 0x3f;
+ }
+
+ ndev->hw_features = ndev->features;
+
+ fec_restart(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static void fec_reset_phy(struct platform_device *pdev)
+{
+ int err, phy_reset;
+ int msec = 1;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return;
+
+ of_property_read_u32(np, "phy-reset-duration", &msec);
+ /* A sane reset duration should not be longer than 1s */
+ if (msec > 1000)
+ msec = 1;
+
+ phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
+ if (!gpio_is_valid(phy_reset))
+ return;
+
+ err = devm_gpio_request_one(&pdev->dev, phy_reset,
+ GPIOF_OUT_INIT_LOW, "phy-reset");
+ if (err) {
+ dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
+ return;
+ }
+ msleep(msec);
+ gpio_set_value(phy_reset, 1);
+}
+#else /* CONFIG_OF */
+static void fec_reset_phy(struct platform_device *pdev)
+{
+ /*
+ * In case of platform probe, the reset has been done
+ * by machine code.
+ */
+}
+#endif /* CONFIG_OF */
+
+static void
+fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int err;
+
+ *num_tx = *num_rx = 1;
+
+ if (!np || !of_device_is_available(np))
+ return;
+
+ /* parse the num of tx and rx queues */
+ err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
+ if (err)
+ *num_tx = 1;
+
+ err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
+ if (err)
+ *num_rx = 1;
+
+ if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
+ dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
+ *num_tx);
+ *num_tx = 1;
+ return;
+ }
+
+ if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
+ dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
+ *num_rx);
+ *num_rx = 1;
+ return;
+ }
+
+}
+
+static int
+fec_probe(struct platform_device *pdev)
+{
+ struct fec_enet_private *fep;
+ struct fec_platform_data *pdata;
+ struct net_device *ndev;
+ int i, irq, ret = 0;
+ struct resource *r;
+ const struct of_device_id *of_id;
+ static int dev_id;
+ struct device_node *np = pdev->dev.of_node, *phy_node;
+ int num_tx_qs;
+ int num_rx_qs;
+
+ fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
+
+ /* Init network device */
+ ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private),
+ num_tx_qs, num_rx_qs);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ /* setup board info structure */
+ fep = netdev_priv(ndev);
+
+ of_id = of_match_device(fec_dt_ids, &pdev->dev);
+ if (of_id)
+ pdev->id_entry = of_id->data;
+ fep->quirks = pdev->id_entry->driver_data;
+
+ fep->netdev = ndev;
+ fep->num_rx_queues = num_rx_qs;
+ fep->num_tx_queues = num_tx_qs;
+
+#if !defined(CONFIG_M5272)
+ /* default enable pause frame auto negotiation */
+ if (fep->quirks & FEC_QUIRK_HAS_GBIT)
+ fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
+#endif
+
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(&pdev->dev);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fep->hwp = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(fep->hwp)) {
+ ret = PTR_ERR(fep->hwp);
+ goto failed_ioremap;
+ }
+
+ fep->pdev = pdev;
+ fep->dev_id = dev_id++;
+
+ platform_set_drvdata(pdev, ndev);
+
+ if (of_get_property(np, "fsl,magic-packet", NULL))
+ fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
+
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy_node && of_phy_is_fixed_link(np)) {
+ ret = of_phy_register_fixed_link(np);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "broken fixed-link specification\n");
+ goto failed_phy;
+ }
+ phy_node = of_node_get(np);
+ }
+ fep->phy_node = phy_node;
+
+ ret = of_get_phy_mode(pdev->dev.of_node);
+ if (ret < 0) {
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata)
+ fep->phy_interface = pdata->phy;
+ else
+ fep->phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ fep->phy_interface = ret;
+ }
+
+ fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fep->clk_ipg)) {
+ ret = PTR_ERR(fep->clk_ipg);
+ goto failed_clk;
+ }
+
+ fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(fep->clk_ahb)) {
+ ret = PTR_ERR(fep->clk_ahb);
+ goto failed_clk;
+ }
+
+ fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
+
+ /* enet_out is optional, depends on board */
+ fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
+ if (IS_ERR(fep->clk_enet_out))
+ fep->clk_enet_out = NULL;
+
+ fep->ptp_clk_on = false;
+ mutex_init(&fep->ptp_clk_mutex);
+
+ /* clk_ref is optional, depends on board */
+ fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
+ if (IS_ERR(fep->clk_ref))
+ fep->clk_ref = NULL;
+
+ fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
+ fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
+ if (IS_ERR(fep->clk_ptp)) {
+ fep->clk_ptp = NULL;
+ fep->bufdesc_ex = false;
+ }
+
+ ret = fec_enet_clk_enable(ndev, true);
+ if (ret)
+ goto failed_clk;
+
+ fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
+ if (!IS_ERR(fep->reg_phy)) {
+ ret = regulator_enable(fep->reg_phy);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable phy regulator: %d\n", ret);
+ goto failed_regulator;
+ }
+ } else {
+ fep->reg_phy = NULL;
+ }
+
+ fec_reset_phy(pdev);
+
+ if (fep->bufdesc_ex)
+ fec_ptp_init(pdev);
+
+ ret = fec_enet_init(ndev);
+ if (ret)
+ goto failed_init;
+
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ if (i)
+ break;
+ ret = irq;
+ goto failed_irq;
+ }
+ ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
+ 0, pdev->name, ndev);
+ if (ret)
+ goto failed_irq;
+
+ fep->irq[i] = irq;
+ }
+
+ init_completion(&fep->mdio_done);
+ ret = fec_enet_mii_init(pdev);
+ if (ret)
+ goto failed_mii_init;
+
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(ndev);
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&pdev->dev);
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto failed_register;
+
+ device_init_wakeup(&ndev->dev, fep->wol_flag &
+ FEC_WOL_HAS_MAGIC_PACKET);
+
+ if (fep->bufdesc_ex && fep->ptp_clock)
+ netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
+
+ fep->rx_copybreak = COPYBREAK_DEFAULT;
+ INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
+ return 0;
+
+failed_register:
+ fec_enet_mii_remove(fep);
+failed_mii_init:
+failed_irq:
+failed_init:
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+failed_regulator:
+ fec_enet_clk_enable(ndev, false);
+failed_clk:
+failed_phy:
+ of_node_put(phy_node);
+failed_ioremap:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int
+fec_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ cancel_delayed_work_sync(&fep->time_keep);
+ cancel_work_sync(&fep->tx_timeout_work);
+ unregister_netdev(ndev);
+ fec_enet_mii_remove(fep);
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+ if (fep->ptp_clock)
+ ptp_clock_unregister(fep->ptp_clock);
+ of_node_put(fep->phy_node);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static int __maybe_unused fec_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ rtnl_lock();
+ if (netif_running(ndev)) {
+ if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
+ fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
+ phy_stop(fep->phy_dev);
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(ndev);
+ netif_device_detach(ndev);
+ netif_tx_unlock_bh(ndev);
+ fec_stop(ndev);
+ fec_enet_clk_enable(ndev, false);
+ if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ }
+ rtnl_unlock();
+
+ if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+ regulator_disable(fep->reg_phy);
+
+ /* SOC supply clock to phy, when clock is disabled, phy link down
+ * SOC control phy regulator, when regulator is disabled, phy link down
+ */
+ if (fep->clk_enet_out || fep->reg_phy)
+ fep->link = 0;
+
+ return 0;
+}
+
+static int __maybe_unused fec_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ int ret;
+ int val;
+
+ if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
+ ret = regulator_enable(fep->reg_phy);
+ if (ret)
+ return ret;
+ }
+
+ rtnl_lock();
+ if (netif_running(ndev)) {
+ ret = fec_enet_clk_enable(ndev, true);
+ if (ret) {
+ rtnl_unlock();
+ goto failed_clk;
+ }
+ if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
+ if (pdata && pdata->sleep_mode_enable)
+ pdata->sleep_mode_enable(false);
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+ fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
+ } else {
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ }
+ fec_restart(ndev);
+ netif_tx_lock_bh(ndev);
+ netif_device_attach(ndev);
+ netif_tx_unlock_bh(ndev);
+ napi_enable(&fep->napi);
+ phy_start(fep->phy_dev);
+ }
+ rtnl_unlock();
+
+ return 0;
+
+failed_clk:
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
+
+static struct platform_driver fec_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &fec_pm_ops,
+ .of_match_table = fec_dt_ids,
+ },
+ .id_table = fec_devtype,
+ .probe = fec_probe,
+ .remove = fec_drv_remove,
+};
+
+module_platform_driver(fec_driver);
+
+MODULE_ALIAS("platform:"DRIVER_NAME);
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/net/ethernet/freescale/fec_mpc52xx.c b/kernel/drivers/net/ethernet/freescale/fec_mpc52xx.c
new file mode 100644
index 000000000..afe7f39cd
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -0,0 +1,1116 @@
+/*
+ * Driver for the MPC5200 Fast Ethernet Controller
+ *
+ * Originally written by Dale Farnsworth <dfarnsworth@mvista.com> and
+ * now maintained by Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
+ * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/crc32.h>
+#include <linux/hardirq.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/mpc52xx.h>
+
+#include <linux/fsl/bestcomm/bestcomm.h>
+#include <linux/fsl/bestcomm/fec.h>
+
+#include "fec_mpc52xx.h"
+
+#define DRIVER_NAME "mpc52xx-fec"
+
+/* Private driver data structure */
+struct mpc52xx_fec_priv {
+ struct net_device *ndev;
+ int duplex;
+ int speed;
+ int r_irq;
+ int t_irq;
+ struct mpc52xx_fec __iomem *fec;
+ struct bcom_task *rx_dmatsk;
+ struct bcom_task *tx_dmatsk;
+ spinlock_t lock;
+ int msg_enable;
+
+ /* MDIO link details */
+ unsigned int mdio_speed;
+ struct device_node *phy_node;
+ struct phy_device *phydev;
+ enum phy_state link;
+ int seven_wire_mode;
+};
+
+
+static irqreturn_t mpc52xx_fec_interrupt(int, void *);
+static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *);
+static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *);
+static void mpc52xx_fec_stop(struct net_device *dev);
+static void mpc52xx_fec_start(struct net_device *dev);
+static void mpc52xx_fec_reset(struct net_device *dev);
+
+#define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+static int debug = -1; /* the above default */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "debugging messages level");
+
+static void mpc52xx_fec_tx_timeout(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ dev_warn(&dev->dev, "transmit timed out\n");
+
+ spin_lock_irqsave(&priv->lock, flags);
+ mpc52xx_fec_reset(dev);
+ dev->stats.tx_errors++;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ netif_wake_queue(dev);
+}
+
+static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
+ out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
+}
+
+static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sock = addr;
+
+ memcpy(dev->dev_addr, sock->sa_data, dev->addr_len);
+
+ mpc52xx_fec_set_paddr(dev, sock->sa_data);
+ return 0;
+}
+
+static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task *s)
+{
+ while (!bcom_queue_empty(s)) {
+ struct bcom_fec_bd *bd;
+ struct sk_buff *skb;
+
+ skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd);
+ dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
+ DMA_FROM_DEVICE);
+ kfree_skb(skb);
+ }
+}
+
+static void
+mpc52xx_fec_rx_submit(struct net_device *dev, struct sk_buff *rskb)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct bcom_fec_bd *bd;
+
+ bd = (struct bcom_fec_bd *) bcom_prepare_next_buffer(priv->rx_dmatsk);
+ bd->status = FEC_RX_BUFFER_SIZE;
+ bd->skb_pa = dma_map_single(dev->dev.parent, rskb->data,
+ FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
+ bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
+}
+
+static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk)
+{
+ struct sk_buff *skb;
+
+ while (!bcom_queue_full(rxtsk)) {
+ skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
+ if (!skb)
+ return -EAGAIN;
+
+ /* zero out the initial receive buffers to aid debugging */
+ memset(skb->data, 0, FEC_RX_BUFFER_SIZE);
+ mpc52xx_fec_rx_submit(dev, skb);
+ }
+ return 0;
+}
+
+/* based on generic_adjust_link from fs_enet-main.c */
+static void mpc52xx_fec_adjust_link(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ int new_state = 0;
+
+ if (phydev->link != PHY_DOWN) {
+ if (phydev->duplex != priv->duplex) {
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 rcntrl;
+ u32 tcntrl;
+
+ new_state = 1;
+ priv->duplex = phydev->duplex;
+
+ rcntrl = in_be32(&fec->r_cntrl);
+ tcntrl = in_be32(&fec->x_cntrl);
+
+ rcntrl &= ~FEC_RCNTRL_DRT;
+ tcntrl &= ~FEC_TCNTRL_FDEN;
+ if (phydev->duplex == DUPLEX_FULL)
+ tcntrl |= FEC_TCNTRL_FDEN; /* FD enable */
+ else
+ rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
+
+ out_be32(&fec->r_cntrl, rcntrl);
+ out_be32(&fec->x_cntrl, tcntrl);
+ }
+
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ priv->speed = phydev->speed;
+ }
+
+ if (priv->link == PHY_DOWN) {
+ new_state = 1;
+ priv->link = phydev->link;
+ }
+
+ } else if (priv->link) {
+ new_state = 1;
+ priv->link = PHY_DOWN;
+ priv->speed = 0;
+ priv->duplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+}
+
+static int mpc52xx_fec_open(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ int err = -EBUSY;
+
+ if (priv->phy_node) {
+ priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
+ mpc52xx_fec_adjust_link, 0, 0);
+ if (!priv->phydev) {
+ dev_err(&dev->dev, "of_phy_connect failed\n");
+ return -ENODEV;
+ }
+ phy_start(priv->phydev);
+ }
+
+ if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
+ DRIVER_NAME "_ctrl", dev)) {
+ dev_err(&dev->dev, "ctrl interrupt request failed\n");
+ goto free_phy;
+ }
+ if (request_irq(priv->r_irq, mpc52xx_fec_rx_interrupt, 0,
+ DRIVER_NAME "_rx", dev)) {
+ dev_err(&dev->dev, "rx interrupt request failed\n");
+ goto free_ctrl_irq;
+ }
+ if (request_irq(priv->t_irq, mpc52xx_fec_tx_interrupt, 0,
+ DRIVER_NAME "_tx", dev)) {
+ dev_err(&dev->dev, "tx interrupt request failed\n");
+ goto free_2irqs;
+ }
+
+ bcom_fec_rx_reset(priv->rx_dmatsk);
+ bcom_fec_tx_reset(priv->tx_dmatsk);
+
+ err = mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
+ if (err) {
+ dev_err(&dev->dev, "mpc52xx_fec_alloc_rx_buffers failed\n");
+ goto free_irqs;
+ }
+
+ bcom_enable(priv->rx_dmatsk);
+ bcom_enable(priv->tx_dmatsk);
+
+ mpc52xx_fec_start(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+ free_irqs:
+ free_irq(priv->t_irq, dev);
+ free_2irqs:
+ free_irq(priv->r_irq, dev);
+ free_ctrl_irq:
+ free_irq(dev->irq, dev);
+ free_phy:
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ return err;
+}
+
+static int mpc52xx_fec_close(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ mpc52xx_fec_stop(dev);
+
+ mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
+
+ free_irq(dev->irq, dev);
+ free_irq(priv->r_irq, dev);
+ free_irq(priv->t_irq, dev);
+
+ if (priv->phydev) {
+ /* power down phy */
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ return 0;
+}
+
+/* This will only be invoked if your driver is _not_ in XOFF state.
+ * What this means is that you need not check it, and that this
+ * invariant will hold if you make sure that the netif_*_queue()
+ * calls are done at the proper times.
+ */
+static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct bcom_fec_bd *bd;
+ unsigned long flags;
+
+ if (bcom_queue_full(priv->tx_dmatsk)) {
+ if (net_ratelimit())
+ dev_err(&dev->dev, "transmit queue overrun\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ bd = (struct bcom_fec_bd *)
+ bcom_prepare_next_buffer(priv->tx_dmatsk);
+
+ bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC;
+ bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len,
+ DMA_TO_DEVICE);
+
+ skb_tx_timestamp(skb);
+ bcom_submit_next_buffer(priv->tx_dmatsk, skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (bcom_queue_full(priv->tx_dmatsk)) {
+ netif_stop_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mpc52xx_fec_poll_controller(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ disable_irq(priv->t_irq);
+ mpc52xx_fec_tx_interrupt(priv->t_irq, dev);
+ enable_irq(priv->t_irq);
+ disable_irq(priv->r_irq);
+ mpc52xx_fec_rx_interrupt(priv->r_irq, dev);
+ enable_irq(priv->r_irq);
+}
+#endif
+
+
+/* This handles BestComm transmit task interrupts
+ */
+static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ while (bcom_buffer_done(priv->tx_dmatsk)) {
+ struct sk_buff *skb;
+ struct bcom_fec_bd *bd;
+ skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL,
+ (struct bcom_bd **)&bd);
+ dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb_irq(skb);
+ }
+ spin_unlock(&priv->lock);
+
+ netif_wake_queue(dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct sk_buff *rskb; /* received sk_buff */
+ struct sk_buff *skb; /* new sk_buff to enqueue in its place */
+ struct bcom_fec_bd *bd;
+ u32 status, physaddr;
+ int length;
+
+ spin_lock(&priv->lock);
+
+ while (bcom_buffer_done(priv->rx_dmatsk)) {
+
+ rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
+ (struct bcom_bd **)&bd);
+ physaddr = bd->skb_pa;
+
+ /* Test for errors in received frame */
+ if (status & BCOM_FEC_RX_BD_ERRORS) {
+ /* Drop packet and reuse the buffer */
+ mpc52xx_fec_rx_submit(dev, rskb);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+
+ /* skbs are allocated on open, so now we allocate a new one,
+ * and remove the old (with the packet) */
+ skb = netdev_alloc_skb(dev, FEC_RX_BUFFER_SIZE);
+ if (!skb) {
+ /* Can't get a new one : reuse the same & drop pkt */
+ dev_notice(&dev->dev, "Low memory - dropped packet.\n");
+ mpc52xx_fec_rx_submit(dev, rskb);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+
+ /* Enqueue the new sk_buff back on the hardware */
+ mpc52xx_fec_rx_submit(dev, skb);
+
+ /* Process the received skb - Drop the spin lock while
+ * calling into the network stack */
+ spin_unlock(&priv->lock);
+
+ dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
+ DMA_FROM_DEVICE);
+ length = status & BCOM_FEC_RX_BD_LEN_MASK;
+ skb_put(rskb, length - 4); /* length without CRC32 */
+ rskb->protocol = eth_type_trans(rskb, dev);
+ if (!skb_defer_rx_timestamp(rskb))
+ netif_rx(rskb);
+
+ spin_lock(&priv->lock);
+ }
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 ievent;
+
+ ievent = in_be32(&fec->ievent);
+
+ ievent &= ~FEC_IEVENT_MII; /* mii is handled separately */
+ if (!ievent)
+ return IRQ_NONE;
+
+ out_be32(&fec->ievent, ievent); /* clear pending events */
+
+ /* on fifo error, soft-reset fec */
+ if (ievent & (FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) {
+
+ if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR))
+ dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n");
+ if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
+ dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
+
+ spin_lock(&priv->lock);
+ mpc52xx_fec_reset(dev);
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+ }
+
+ if (ievent & ~FEC_IEVENT_TFINT)
+ dev_dbg(&dev->dev, "ievent: %08x\n", ievent);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *mpc52xx_fec_get_stats(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ stats->rx_bytes = in_be32(&fec->rmon_r_octets);
+ stats->rx_packets = in_be32(&fec->rmon_r_packets);
+ stats->rx_errors = in_be32(&fec->rmon_r_crc_align) +
+ in_be32(&fec->rmon_r_undersize) +
+ in_be32(&fec->rmon_r_oversize) +
+ in_be32(&fec->rmon_r_frag) +
+ in_be32(&fec->rmon_r_jab);
+
+ stats->tx_bytes = in_be32(&fec->rmon_t_octets);
+ stats->tx_packets = in_be32(&fec->rmon_t_packets);
+ stats->tx_errors = in_be32(&fec->rmon_t_crc_align) +
+ in_be32(&fec->rmon_t_undersize) +
+ in_be32(&fec->rmon_t_oversize) +
+ in_be32(&fec->rmon_t_frag) +
+ in_be32(&fec->rmon_t_jab);
+
+ stats->multicast = in_be32(&fec->rmon_r_mc_pkt);
+ stats->collisions = in_be32(&fec->rmon_t_col);
+
+ /* detailed rx_errors: */
+ stats->rx_length_errors = in_be32(&fec->rmon_r_undersize)
+ + in_be32(&fec->rmon_r_oversize)
+ + in_be32(&fec->rmon_r_frag)
+ + in_be32(&fec->rmon_r_jab);
+ stats->rx_over_errors = in_be32(&fec->r_macerr);
+ stats->rx_crc_errors = in_be32(&fec->ieee_r_crc);
+ stats->rx_frame_errors = in_be32(&fec->ieee_r_align);
+ stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop);
+ stats->rx_missed_errors = in_be32(&fec->rmon_r_drop);
+
+ /* detailed tx_errors: */
+ stats->tx_aborted_errors = 0;
+ stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr);
+ stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop);
+ stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe);
+ stats->tx_window_errors = in_be32(&fec->ieee_t_lcol);
+
+ return stats;
+}
+
+/*
+ * Read MIB counters in order to reset them,
+ * then zero all the stats fields in memory
+ */
+static void mpc52xx_fec_reset_stats(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ out_be32(&fec->mib_control, FEC_MIB_DISABLE);
+ memset_io(&fec->rmon_t_drop, 0,
+ offsetof(struct mpc52xx_fec, reserved10) -
+ offsetof(struct mpc52xx_fec, rmon_t_drop));
+ out_be32(&fec->mib_control, 0);
+
+ memset(&dev->stats, 0, sizeof(dev->stats));
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 rx_control;
+
+ rx_control = in_be32(&fec->r_cntrl);
+
+ if (dev->flags & IFF_PROMISC) {
+ rx_control |= FEC_RCNTRL_PROM;
+ out_be32(&fec->r_cntrl, rx_control);
+ } else {
+ rx_control &= ~FEC_RCNTRL_PROM;
+ out_be32(&fec->r_cntrl, rx_control);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ out_be32(&fec->gaddr1, 0xffffffff);
+ out_be32(&fec->gaddr2, 0xffffffff);
+ } else {
+ u32 crc;
+ struct netdev_hw_addr *ha;
+ u32 gaddr1 = 0x00000000;
+ u32 gaddr2 = 0x00000000;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr) >> 26;
+ if (crc >= 32)
+ gaddr1 |= 1 << (crc-32);
+ else
+ gaddr2 |= 1 << crc;
+ }
+ out_be32(&fec->gaddr1, gaddr1);
+ out_be32(&fec->gaddr2, gaddr2);
+ }
+ }
+}
+
+/**
+ * mpc52xx_fec_hw_init
+ * @dev: network device
+ *
+ * Setup various hardware setting, only needed once on start
+ */
+static void mpc52xx_fec_hw_init(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ int i;
+
+ /* Whack a reset. We should wait for this. */
+ out_be32(&fec->ecntrl, FEC_ECNTRL_RESET);
+ for (i = 0; i < FEC_RESET_DELAY; ++i) {
+ if ((in_be32(&fec->ecntrl) & FEC_ECNTRL_RESET) == 0)
+ break;
+ udelay(1);
+ }
+ if (i == FEC_RESET_DELAY)
+ dev_err(&dev->dev, "FEC Reset timeout!\n");
+
+ /* set pause to 0x20 frames */
+ out_be32(&fec->op_pause, FEC_OP_PAUSE_OPCODE | 0x20);
+
+ /* high service request will be deasserted when there's < 7 bytes in fifo
+ * low service request will be deasserted when there's < 4*7 bytes in fifo
+ */
+ out_be32(&fec->rfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
+ out_be32(&fec->tfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
+
+ /* alarm when <= x bytes in FIFO */
+ out_be32(&fec->rfifo_alarm, 0x0000030c);
+ out_be32(&fec->tfifo_alarm, 0x00000100);
+
+ /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
+ out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
+
+ /* enable crc generation */
+ out_be32(&fec->xmit_fsm, FEC_XMIT_FSM_APPEND_CRC | FEC_XMIT_FSM_ENABLE_CRC);
+ out_be32(&fec->iaddr1, 0x00000000); /* No individual filter */
+ out_be32(&fec->iaddr2, 0x00000000); /* No individual filter */
+
+ /* set phy speed.
+ * this can't be done in phy driver, since it needs to be called
+ * before fec stuff (even on resume) */
+ out_be32(&fec->mii_speed, priv->mdio_speed);
+}
+
+/**
+ * mpc52xx_fec_start
+ * @dev: network device
+ *
+ * This function is called to start or restart the FEC during a link
+ * change. This happens on fifo errors or when switching between half
+ * and full duplex.
+ */
+static void mpc52xx_fec_start(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 rcntrl;
+ u32 tcntrl;
+ u32 tmp;
+
+ /* clear sticky error bits */
+ tmp = FEC_FIFO_STATUS_ERR | FEC_FIFO_STATUS_UF | FEC_FIFO_STATUS_OF;
+ out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & tmp);
+ out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & tmp);
+
+ /* FIFOs will reset on mpc52xx_fec_enable */
+ out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_ENABLE_IS_RESET);
+
+ /* Set station address. */
+ mpc52xx_fec_set_paddr(dev, dev->dev_addr);
+
+ mpc52xx_fec_set_multicast_list(dev);
+
+ /* set max frame len, enable flow control, select mii mode */
+ rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */
+ rcntrl |= FEC_RCNTRL_FCE;
+
+ if (!priv->seven_wire_mode)
+ rcntrl |= FEC_RCNTRL_MII_MODE;
+
+ if (priv->duplex == DUPLEX_FULL)
+ tcntrl = FEC_TCNTRL_FDEN; /* FD enable */
+ else {
+ rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
+ tcntrl = 0;
+ }
+ out_be32(&fec->r_cntrl, rcntrl);
+ out_be32(&fec->x_cntrl, tcntrl);
+
+ /* Clear any outstanding interrupt. */
+ out_be32(&fec->ievent, 0xffffffff);
+
+ /* Enable interrupts we wish to service. */
+ out_be32(&fec->imask, FEC_IMASK_ENABLE);
+
+ /* And last, enable the transmit and receive processing. */
+ out_be32(&fec->ecntrl, FEC_ECNTRL_ETHER_EN);
+ out_be32(&fec->r_des_active, 0x01000000);
+}
+
+/**
+ * mpc52xx_fec_stop
+ * @dev: network device
+ *
+ * stop all activity on fec and empty dma buffers
+ */
+static void mpc52xx_fec_stop(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ unsigned long timeout;
+
+ /* disable all interrupts */
+ out_be32(&fec->imask, 0);
+
+ /* Disable the rx task. */
+ bcom_disable(priv->rx_dmatsk);
+
+ /* Wait for tx queue to drain, but only if we're in process context */
+ if (!in_interrupt()) {
+ timeout = jiffies + msecs_to_jiffies(2000);
+ while (time_before(jiffies, timeout) &&
+ !bcom_queue_empty(priv->tx_dmatsk))
+ msleep(100);
+
+ if (time_after_eq(jiffies, timeout))
+ dev_err(&dev->dev, "queues didn't drain\n");
+#if 1
+ if (time_after_eq(jiffies, timeout)) {
+ dev_err(&dev->dev, " tx: index: %i, outdex: %i\n",
+ priv->tx_dmatsk->index,
+ priv->tx_dmatsk->outdex);
+ dev_err(&dev->dev, " rx: index: %i, outdex: %i\n",
+ priv->rx_dmatsk->index,
+ priv->rx_dmatsk->outdex);
+ }
+#endif
+ }
+
+ bcom_disable(priv->tx_dmatsk);
+
+ /* Stop FEC */
+ out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN);
+}
+
+/* reset fec and bestcomm tasks */
+static void mpc52xx_fec_reset(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ mpc52xx_fec_stop(dev);
+
+ out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status));
+ out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO);
+
+ mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
+
+ mpc52xx_fec_hw_init(dev);
+
+ bcom_fec_rx_reset(priv->rx_dmatsk);
+ bcom_fec_tx_reset(priv->tx_dmatsk);
+
+ mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
+
+ bcom_enable(priv->rx_dmatsk);
+ bcom_enable(priv->tx_dmatsk);
+
+ mpc52xx_fec_start(dev);
+
+ netif_wake_queue(dev);
+}
+
+
+/* ethtool interface */
+
+static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
+ .get_settings = mpc52xx_fec_get_settings,
+ .set_settings = mpc52xx_fec_set_settings,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = mpc52xx_fec_get_msglevel,
+ .set_msglevel = mpc52xx_fec_set_msglevel,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+
+static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ if (!priv->phydev)
+ return -ENOTSUPP;
+
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
+}
+
+static const struct net_device_ops mpc52xx_fec_netdev_ops = {
+ .ndo_open = mpc52xx_fec_open,
+ .ndo_stop = mpc52xx_fec_close,
+ .ndo_start_xmit = mpc52xx_fec_start_xmit,
+ .ndo_set_rx_mode = mpc52xx_fec_set_multicast_list,
+ .ndo_set_mac_address = mpc52xx_fec_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = mpc52xx_fec_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_tx_timeout = mpc52xx_fec_tx_timeout,
+ .ndo_get_stats = mpc52xx_fec_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mpc52xx_fec_poll_controller,
+#endif
+};
+
+/* ======================================================================== */
+/* OF Driver */
+/* ======================================================================== */
+
+static int mpc52xx_fec_probe(struct platform_device *op)
+{
+ int rv;
+ struct net_device *ndev;
+ struct mpc52xx_fec_priv *priv = NULL;
+ struct resource mem;
+ const u32 *prop;
+ int prop_size;
+ struct device_node *np = op->dev.of_node;
+ const char *mac_addr;
+
+ phys_addr_t rx_fifo;
+ phys_addr_t tx_fifo;
+
+ /* Get the ether ndev & it's private zone */
+ ndev = alloc_etherdev(sizeof(struct mpc52xx_fec_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+
+ /* Reserve FEC control zone */
+ rv = of_address_to_resource(np, 0, &mem);
+ if (rv) {
+ pr_err("Error while parsing device node resource\n");
+ goto err_netdev;
+ }
+ if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) {
+ pr_err("invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
+ (unsigned long)resource_size(&mem),
+ sizeof(struct mpc52xx_fec));
+ rv = -EINVAL;
+ goto err_netdev;
+ }
+
+ if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec),
+ DRIVER_NAME)) {
+ rv = -EBUSY;
+ goto err_netdev;
+ }
+
+ /* Init ether ndev with what we have */
+ ndev->netdev_ops = &mpc52xx_fec_netdev_ops;
+ ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops;
+ ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
+ ndev->base_addr = mem.start;
+ SET_NETDEV_DEV(ndev, &op->dev);
+
+ spin_lock_init(&priv->lock);
+
+ /* ioremap the zones */
+ priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec));
+
+ if (!priv->fec) {
+ rv = -ENOMEM;
+ goto err_mem_region;
+ }
+
+ /* Bestcomm init */
+ rx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, rfifo_data);
+ tx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, tfifo_data);
+
+ priv->rx_dmatsk = bcom_fec_rx_init(FEC_RX_NUM_BD, rx_fifo, FEC_RX_BUFFER_SIZE);
+ priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo);
+
+ if (!priv->rx_dmatsk || !priv->tx_dmatsk) {
+ pr_err("Can not init SDMA tasks\n");
+ rv = -ENOMEM;
+ goto err_rx_tx_dmatsk;
+ }
+
+ /* Get the IRQ we need one by one */
+ /* Control */
+ ndev->irq = irq_of_parse_and_map(np, 0);
+
+ /* RX */
+ priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
+
+ /* TX */
+ priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
+
+ /*
+ * MAC address init:
+ *
+ * First try to read MAC address from DT
+ */
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr) {
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ } else {
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ /*
+ * If the MAC addresse is not provided via DT then read
+ * it back from the controller regs
+ */
+ *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+ }
+
+ /*
+ * Check if the MAC address is valid, if not get a random one
+ */
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(&ndev->dev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
+
+ priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
+
+ /*
+ * Link mode configuration
+ */
+
+ /* Start with safe defaults for link connection */
+ priv->speed = 100;
+ priv->duplex = DUPLEX_HALF;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
+
+ /* The current speed preconfigures the speed of the MII link */
+ prop = of_get_property(np, "current-speed", &prop_size);
+ if (prop && (prop_size >= sizeof(u32) * 2)) {
+ priv->speed = prop[0];
+ priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ /* If there is a phy handle, then get the PHY node */
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+ /* the 7-wire property means don't use MII mode */
+ if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
+ priv->seven_wire_mode = 1;
+ dev_info(&ndev->dev, "using 7-wire PHY mode\n");
+ }
+
+ /* Hardware init */
+ mpc52xx_fec_hw_init(ndev);
+ mpc52xx_fec_reset_stats(ndev);
+
+ rv = register_netdev(ndev);
+ if (rv < 0)
+ goto err_node;
+
+ /* We're done ! */
+ platform_set_drvdata(op, ndev);
+ netdev_info(ndev, "%s MAC %pM\n",
+ op->dev.of_node->full_name, ndev->dev_addr);
+
+ return 0;
+
+err_node:
+ of_node_put(priv->phy_node);
+ irq_dispose_mapping(ndev->irq);
+err_rx_tx_dmatsk:
+ if (priv->rx_dmatsk)
+ bcom_fec_rx_release(priv->rx_dmatsk);
+ if (priv->tx_dmatsk)
+ bcom_fec_tx_release(priv->tx_dmatsk);
+ iounmap(priv->fec);
+err_mem_region:
+ release_mem_region(mem.start, sizeof(struct mpc52xx_fec));
+err_netdev:
+ free_netdev(ndev);
+
+ return rv;
+}
+
+static int
+mpc52xx_fec_remove(struct platform_device *op)
+{
+ struct net_device *ndev;
+ struct mpc52xx_fec_priv *priv;
+
+ ndev = platform_get_drvdata(op);
+ priv = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+
+ of_node_put(priv->phy_node);
+ priv->phy_node = NULL;
+
+ irq_dispose_mapping(ndev->irq);
+
+ bcom_fec_rx_release(priv->rx_dmatsk);
+ bcom_fec_tx_release(priv->tx_dmatsk);
+
+ iounmap(priv->fec);
+
+ release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec));
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mpc52xx_fec_of_suspend(struct platform_device *op, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(op);
+
+ if (netif_running(dev))
+ mpc52xx_fec_close(dev);
+
+ return 0;
+}
+
+static int mpc52xx_fec_of_resume(struct platform_device *op)
+{
+ struct net_device *dev = platform_get_drvdata(op);
+
+ mpc52xx_fec_hw_init(dev);
+ mpc52xx_fec_reset_stats(dev);
+
+ if (netif_running(dev))
+ mpc52xx_fec_open(dev);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id mpc52xx_fec_match[] = {
+ { .compatible = "fsl,mpc5200b-fec", },
+ { .compatible = "fsl,mpc5200-fec", },
+ { .compatible = "mpc5200-fec", },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
+
+static struct platform_driver mpc52xx_fec_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mpc52xx_fec_match,
+ },
+ .probe = mpc52xx_fec_probe,
+ .remove = mpc52xx_fec_remove,
+#ifdef CONFIG_PM
+ .suspend = mpc52xx_fec_of_suspend,
+ .resume = mpc52xx_fec_of_resume,
+#endif
+};
+
+
+/* ======================================================================== */
+/* Module */
+/* ======================================================================== */
+
+static int __init
+mpc52xx_fec_init(void)
+{
+#ifdef CONFIG_FEC_MPC52xx_MDIO
+ int ret;
+ ret = platform_driver_register(&mpc52xx_fec_mdio_driver);
+ if (ret) {
+ pr_err("failed to register mdio driver\n");
+ return ret;
+ }
+#endif
+ return platform_driver_register(&mpc52xx_fec_driver);
+}
+
+static void __exit
+mpc52xx_fec_exit(void)
+{
+ platform_driver_unregister(&mpc52xx_fec_driver);
+#ifdef CONFIG_FEC_MPC52xx_MDIO
+ platform_driver_unregister(&mpc52xx_fec_mdio_driver);
+#endif
+}
+
+
+module_init(mpc52xx_fec_init);
+module_exit(mpc52xx_fec_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dale Farnsworth");
+MODULE_DESCRIPTION("Ethernet driver for the Freescale MPC52xx FEC");
diff --git a/kernel/drivers/net/ethernet/freescale/fec_mpc52xx.h b/kernel/drivers/net/ethernet/freescale/fec_mpc52xx.h
new file mode 100644
index 000000000..10afa54dd
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fec_mpc52xx.h
@@ -0,0 +1,294 @@
+/*
+ * drivers/net/ethernet/freescale/fec_mpc52xx.h
+ *
+ * Driver for the MPC5200 Fast Ethernet Controller
+ *
+ * Author: Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ * 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifndef __DRIVERS_NET_MPC52XX_FEC_H__
+#define __DRIVERS_NET_MPC52XX_FEC_H__
+
+#include <linux/phy.h>
+
+/* Tunable constant */
+/* FEC_RX_BUFFER_SIZE includes 4 bytes for CRC32 */
+#define FEC_RX_BUFFER_SIZE 1522 /* max receive packet size */
+#define FEC_RX_NUM_BD 256
+#define FEC_TX_NUM_BD 64
+
+#define FEC_RESET_DELAY 50 /* uS */
+
+#define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000)
+
+/* ======================================================================== */
+/* Hardware register sets & bits */
+/* ======================================================================== */
+
+struct mpc52xx_fec {
+ u32 fec_id; /* FEC + 0x000 */
+ u32 ievent; /* FEC + 0x004 */
+ u32 imask; /* FEC + 0x008 */
+
+ u32 reserved0[1]; /* FEC + 0x00C */
+ u32 r_des_active; /* FEC + 0x010 */
+ u32 x_des_active; /* FEC + 0x014 */
+ u32 r_des_active_cl; /* FEC + 0x018 */
+ u32 x_des_active_cl; /* FEC + 0x01C */
+ u32 ivent_set; /* FEC + 0x020 */
+ u32 ecntrl; /* FEC + 0x024 */
+
+ u32 reserved1[6]; /* FEC + 0x028-03C */
+ u32 mii_data; /* FEC + 0x040 */
+ u32 mii_speed; /* FEC + 0x044 */
+ u32 mii_status; /* FEC + 0x048 */
+
+ u32 reserved2[5]; /* FEC + 0x04C-05C */
+ u32 mib_data; /* FEC + 0x060 */
+ u32 mib_control; /* FEC + 0x064 */
+
+ u32 reserved3[6]; /* FEC + 0x068-7C */
+ u32 r_activate; /* FEC + 0x080 */
+ u32 r_cntrl; /* FEC + 0x084 */
+ u32 r_hash; /* FEC + 0x088 */
+ u32 r_data; /* FEC + 0x08C */
+ u32 ar_done; /* FEC + 0x090 */
+ u32 r_test; /* FEC + 0x094 */
+ u32 r_mib; /* FEC + 0x098 */
+ u32 r_da_low; /* FEC + 0x09C */
+ u32 r_da_high; /* FEC + 0x0A0 */
+
+ u32 reserved4[7]; /* FEC + 0x0A4-0BC */
+ u32 x_activate; /* FEC + 0x0C0 */
+ u32 x_cntrl; /* FEC + 0x0C4 */
+ u32 backoff; /* FEC + 0x0C8 */
+ u32 x_data; /* FEC + 0x0CC */
+ u32 x_status; /* FEC + 0x0D0 */
+ u32 x_mib; /* FEC + 0x0D4 */
+ u32 x_test; /* FEC + 0x0D8 */
+ u32 fdxfc_da1; /* FEC + 0x0DC */
+ u32 fdxfc_da2; /* FEC + 0x0E0 */
+ u32 paddr1; /* FEC + 0x0E4 */
+ u32 paddr2; /* FEC + 0x0E8 */
+ u32 op_pause; /* FEC + 0x0EC */
+
+ u32 reserved5[4]; /* FEC + 0x0F0-0FC */
+ u32 instr_reg; /* FEC + 0x100 */
+ u32 context_reg; /* FEC + 0x104 */
+ u32 test_cntrl; /* FEC + 0x108 */
+ u32 acc_reg; /* FEC + 0x10C */
+ u32 ones; /* FEC + 0x110 */
+ u32 zeros; /* FEC + 0x114 */
+ u32 iaddr1; /* FEC + 0x118 */
+ u32 iaddr2; /* FEC + 0x11C */
+ u32 gaddr1; /* FEC + 0x120 */
+ u32 gaddr2; /* FEC + 0x124 */
+ u32 random; /* FEC + 0x128 */
+ u32 rand1; /* FEC + 0x12C */
+ u32 tmp; /* FEC + 0x130 */
+
+ u32 reserved6[3]; /* FEC + 0x134-13C */
+ u32 fifo_id; /* FEC + 0x140 */
+ u32 x_wmrk; /* FEC + 0x144 */
+ u32 fcntrl; /* FEC + 0x148 */
+ u32 r_bound; /* FEC + 0x14C */
+ u32 r_fstart; /* FEC + 0x150 */
+ u32 r_count; /* FEC + 0x154 */
+ u32 r_lag; /* FEC + 0x158 */
+ u32 r_read; /* FEC + 0x15C */
+ u32 r_write; /* FEC + 0x160 */
+ u32 x_count; /* FEC + 0x164 */
+ u32 x_lag; /* FEC + 0x168 */
+ u32 x_retry; /* FEC + 0x16C */
+ u32 x_write; /* FEC + 0x170 */
+ u32 x_read; /* FEC + 0x174 */
+
+ u32 reserved7[2]; /* FEC + 0x178-17C */
+ u32 fm_cntrl; /* FEC + 0x180 */
+ u32 rfifo_data; /* FEC + 0x184 */
+ u32 rfifo_status; /* FEC + 0x188 */
+ u32 rfifo_cntrl; /* FEC + 0x18C */
+ u32 rfifo_lrf_ptr; /* FEC + 0x190 */
+ u32 rfifo_lwf_ptr; /* FEC + 0x194 */
+ u32 rfifo_alarm; /* FEC + 0x198 */
+ u32 rfifo_rdptr; /* FEC + 0x19C */
+ u32 rfifo_wrptr; /* FEC + 0x1A0 */
+ u32 tfifo_data; /* FEC + 0x1A4 */
+ u32 tfifo_status; /* FEC + 0x1A8 */
+ u32 tfifo_cntrl; /* FEC + 0x1AC */
+ u32 tfifo_lrf_ptr; /* FEC + 0x1B0 */
+ u32 tfifo_lwf_ptr; /* FEC + 0x1B4 */
+ u32 tfifo_alarm; /* FEC + 0x1B8 */
+ u32 tfifo_rdptr; /* FEC + 0x1BC */
+ u32 tfifo_wrptr; /* FEC + 0x1C0 */
+
+ u32 reset_cntrl; /* FEC + 0x1C4 */
+ u32 xmit_fsm; /* FEC + 0x1C8 */
+
+ u32 reserved8[3]; /* FEC + 0x1CC-1D4 */
+ u32 rdes_data0; /* FEC + 0x1D8 */
+ u32 rdes_data1; /* FEC + 0x1DC */
+ u32 r_length; /* FEC + 0x1E0 */
+ u32 x_length; /* FEC + 0x1E4 */
+ u32 x_addr; /* FEC + 0x1E8 */
+ u32 cdes_data; /* FEC + 0x1EC */
+ u32 status; /* FEC + 0x1F0 */
+ u32 dma_control; /* FEC + 0x1F4 */
+ u32 des_cmnd; /* FEC + 0x1F8 */
+ u32 data; /* FEC + 0x1FC */
+
+ u32 rmon_t_drop; /* FEC + 0x200 */
+ u32 rmon_t_packets; /* FEC + 0x204 */
+ u32 rmon_t_bc_pkt; /* FEC + 0x208 */
+ u32 rmon_t_mc_pkt; /* FEC + 0x20C */
+ u32 rmon_t_crc_align; /* FEC + 0x210 */
+ u32 rmon_t_undersize; /* FEC + 0x214 */
+ u32 rmon_t_oversize; /* FEC + 0x218 */
+ u32 rmon_t_frag; /* FEC + 0x21C */
+ u32 rmon_t_jab; /* FEC + 0x220 */
+ u32 rmon_t_col; /* FEC + 0x224 */
+ u32 rmon_t_p64; /* FEC + 0x228 */
+ u32 rmon_t_p65to127; /* FEC + 0x22C */
+ u32 rmon_t_p128to255; /* FEC + 0x230 */
+ u32 rmon_t_p256to511; /* FEC + 0x234 */
+ u32 rmon_t_p512to1023; /* FEC + 0x238 */
+ u32 rmon_t_p1024to2047; /* FEC + 0x23C */
+ u32 rmon_t_p_gte2048; /* FEC + 0x240 */
+ u32 rmon_t_octets; /* FEC + 0x244 */
+ u32 ieee_t_drop; /* FEC + 0x248 */
+ u32 ieee_t_frame_ok; /* FEC + 0x24C */
+ u32 ieee_t_1col; /* FEC + 0x250 */
+ u32 ieee_t_mcol; /* FEC + 0x254 */
+ u32 ieee_t_def; /* FEC + 0x258 */
+ u32 ieee_t_lcol; /* FEC + 0x25C */
+ u32 ieee_t_excol; /* FEC + 0x260 */
+ u32 ieee_t_macerr; /* FEC + 0x264 */
+ u32 ieee_t_cserr; /* FEC + 0x268 */
+ u32 ieee_t_sqe; /* FEC + 0x26C */
+ u32 t_fdxfc; /* FEC + 0x270 */
+ u32 ieee_t_octets_ok; /* FEC + 0x274 */
+
+ u32 reserved9[2]; /* FEC + 0x278-27C */
+ u32 rmon_r_drop; /* FEC + 0x280 */
+ u32 rmon_r_packets; /* FEC + 0x284 */
+ u32 rmon_r_bc_pkt; /* FEC + 0x288 */
+ u32 rmon_r_mc_pkt; /* FEC + 0x28C */
+ u32 rmon_r_crc_align; /* FEC + 0x290 */
+ u32 rmon_r_undersize; /* FEC + 0x294 */
+ u32 rmon_r_oversize; /* FEC + 0x298 */
+ u32 rmon_r_frag; /* FEC + 0x29C */
+ u32 rmon_r_jab; /* FEC + 0x2A0 */
+
+ u32 rmon_r_resvd_0; /* FEC + 0x2A4 */
+
+ u32 rmon_r_p64; /* FEC + 0x2A8 */
+ u32 rmon_r_p65to127; /* FEC + 0x2AC */
+ u32 rmon_r_p128to255; /* FEC + 0x2B0 */
+ u32 rmon_r_p256to511; /* FEC + 0x2B4 */
+ u32 rmon_r_p512to1023; /* FEC + 0x2B8 */
+ u32 rmon_r_p1024to2047; /* FEC + 0x2BC */
+ u32 rmon_r_p_gte2048; /* FEC + 0x2C0 */
+ u32 rmon_r_octets; /* FEC + 0x2C4 */
+ u32 ieee_r_drop; /* FEC + 0x2C8 */
+ u32 ieee_r_frame_ok; /* FEC + 0x2CC */
+ u32 ieee_r_crc; /* FEC + 0x2D0 */
+ u32 ieee_r_align; /* FEC + 0x2D4 */
+ u32 r_macerr; /* FEC + 0x2D8 */
+ u32 r_fdxfc; /* FEC + 0x2DC */
+ u32 ieee_r_octets_ok; /* FEC + 0x2E0 */
+
+ u32 reserved10[7]; /* FEC + 0x2E4-2FC */
+
+ u32 reserved11[64]; /* FEC + 0x300-3FF */
+};
+
+#define FEC_MIB_DISABLE 0x80000000
+
+#define FEC_IEVENT_HBERR 0x80000000
+#define FEC_IEVENT_BABR 0x40000000
+#define FEC_IEVENT_BABT 0x20000000
+#define FEC_IEVENT_GRA 0x10000000
+#define FEC_IEVENT_TFINT 0x08000000
+#define FEC_IEVENT_MII 0x00800000
+#define FEC_IEVENT_LATE_COL 0x00200000
+#define FEC_IEVENT_COL_RETRY_LIM 0x00100000
+#define FEC_IEVENT_XFIFO_UN 0x00080000
+#define FEC_IEVENT_XFIFO_ERROR 0x00040000
+#define FEC_IEVENT_RFIFO_ERROR 0x00020000
+
+#define FEC_IMASK_HBERR 0x80000000
+#define FEC_IMASK_BABR 0x40000000
+#define FEC_IMASK_BABT 0x20000000
+#define FEC_IMASK_GRA 0x10000000
+#define FEC_IMASK_MII 0x00800000
+#define FEC_IMASK_LATE_COL 0x00200000
+#define FEC_IMASK_COL_RETRY_LIM 0x00100000
+#define FEC_IMASK_XFIFO_UN 0x00080000
+#define FEC_IMASK_XFIFO_ERROR 0x00040000
+#define FEC_IMASK_RFIFO_ERROR 0x00020000
+
+/* all but MII, which is enabled separately */
+#define FEC_IMASK_ENABLE (FEC_IMASK_HBERR | FEC_IMASK_BABR | \
+ FEC_IMASK_BABT | FEC_IMASK_GRA | FEC_IMASK_LATE_COL | \
+ FEC_IMASK_COL_RETRY_LIM | FEC_IMASK_XFIFO_UN | \
+ FEC_IMASK_XFIFO_ERROR | FEC_IMASK_RFIFO_ERROR)
+
+#define FEC_RCNTRL_MAX_FL_SHIFT 16
+#define FEC_RCNTRL_LOOP 0x01
+#define FEC_RCNTRL_DRT 0x02
+#define FEC_RCNTRL_MII_MODE 0x04
+#define FEC_RCNTRL_PROM 0x08
+#define FEC_RCNTRL_BC_REJ 0x10
+#define FEC_RCNTRL_FCE 0x20
+
+#define FEC_TCNTRL_GTS 0x00000001
+#define FEC_TCNTRL_HBC 0x00000002
+#define FEC_TCNTRL_FDEN 0x00000004
+#define FEC_TCNTRL_TFC_PAUSE 0x00000008
+#define FEC_TCNTRL_RFC_PAUSE 0x00000010
+
+#define FEC_ECNTRL_RESET 0x00000001
+#define FEC_ECNTRL_ETHER_EN 0x00000002
+
+#define FEC_MII_DATA_ST 0x40000000 /* Start frame */
+#define FEC_MII_DATA_OP_RD 0x20000000 /* Perform read */
+#define FEC_MII_DATA_OP_WR 0x10000000 /* Perform write */
+#define FEC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address mask */
+#define FEC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register mask */
+#define FEC_MII_DATA_TA 0x00020000 /* Turnaround */
+#define FEC_MII_DATA_DATAMSK 0x0000ffff /* PHY data mask */
+
+#define FEC_MII_READ_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA)
+#define FEC_MII_WRITE_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | FEC_MII_DATA_TA)
+
+#define FEC_MII_DATA_RA_SHIFT 0x12 /* MII reg addr bits */
+#define FEC_MII_DATA_PA_SHIFT 0x17 /* MII PHY addr bits */
+
+#define FEC_PADDR2_TYPE 0x8808
+
+#define FEC_OP_PAUSE_OPCODE 0x00010000
+
+#define FEC_FIFO_WMRK_256B 0x3
+
+#define FEC_FIFO_STATUS_ERR 0x00400000
+#define FEC_FIFO_STATUS_UF 0x00200000
+#define FEC_FIFO_STATUS_OF 0x00100000
+
+#define FEC_FIFO_CNTRL_FRAME 0x08000000
+#define FEC_FIFO_CNTRL_LTG_7 0x07000000
+
+#define FEC_RESET_CNTRL_RESET_FIFO 0x02000000
+#define FEC_RESET_CNTRL_ENABLE_IS_RESET 0x01000000
+
+#define FEC_XMIT_FSM_APPEND_CRC 0x02000000
+#define FEC_XMIT_FSM_ENABLE_CRC 0x01000000
+
+
+extern struct platform_driver mpc52xx_fec_mdio_driver;
+
+#endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */
diff --git a/kernel/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/kernel/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
new file mode 100644
index 000000000..1e647beaf
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -0,0 +1,158 @@
+/*
+ * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver
+ *
+ * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
+ * Copyright (C) 2008 Wolfram Sang, Pengutronix
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/of_mdio.h>
+#include <asm/io.h>
+#include <asm/mpc52xx.h>
+#include "fec_mpc52xx.h"
+
+struct mpc52xx_fec_mdio_priv {
+ struct mpc52xx_fec __iomem *regs;
+ int mdio_irqs[PHY_MAX_ADDR];
+};
+
+static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
+ int reg, u32 value)
+{
+ struct mpc52xx_fec_mdio_priv *priv = bus->priv;
+ struct mpc52xx_fec __iomem *fec = priv->regs;
+ int tries = 3;
+
+ value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
+ value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
+
+ out_be32(&fec->ievent, FEC_IEVENT_MII);
+ out_be32(&fec->mii_data, value);
+
+ /* wait for it to finish, this takes about 23 us on lite5200b */
+ while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
+ msleep(1);
+
+ if (!tries)
+ return -ETIMEDOUT;
+
+ return value & FEC_MII_DATA_OP_RD ?
+ in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0;
+}
+
+static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME);
+}
+
+static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
+ u16 data)
+{
+ return mpc52xx_fec_mdio_transfer(bus, phy_id, reg,
+ data | FEC_MII_WRITE_FRAME);
+}
+
+static int mpc52xx_fec_mdio_probe(struct platform_device *of)
+{
+ struct device *dev = &of->dev;
+ struct device_node *np = of->dev.of_node;
+ struct mii_bus *bus;
+ struct mpc52xx_fec_mdio_priv *priv;
+ struct resource res;
+ int err;
+
+ bus = mdiobus_alloc();
+ if (bus == NULL)
+ return -ENOMEM;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ bus->name = "mpc52xx MII bus";
+ bus->read = mpc52xx_fec_mdio_read;
+ bus->write = mpc52xx_fec_mdio_write;
+
+ /* setup irqs */
+ bus->irq = priv->mdio_irqs;
+
+ /* setup registers */
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ goto out_free;
+ priv->regs = ioremap(res.start, resource_size(&res));
+ if (priv->regs == NULL) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+ bus->priv = priv;
+
+ bus->parent = dev;
+ dev_set_drvdata(dev, bus);
+
+ /* set MII speed */
+ out_be32(&priv->regs->mii_speed,
+ ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
+
+ err = of_mdiobus_register(bus, np);
+ if (err)
+ goto out_unmap;
+
+ return 0;
+
+ out_unmap:
+ iounmap(priv->regs);
+ out_free:
+ kfree(priv);
+ mdiobus_free(bus);
+
+ return err;
+}
+
+static int mpc52xx_fec_mdio_remove(struct platform_device *of)
+{
+ struct mii_bus *bus = platform_get_drvdata(of);
+ struct mpc52xx_fec_mdio_priv *priv = bus->priv;
+
+ mdiobus_unregister(bus);
+ iounmap(priv->regs);
+ kfree(priv);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static const struct of_device_id mpc52xx_fec_mdio_match[] = {
+ { .compatible = "fsl,mpc5200b-mdio", },
+ { .compatible = "fsl,mpc5200-mdio", },
+ { .compatible = "mpc5200b-fec-phy", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
+
+struct platform_driver mpc52xx_fec_mdio_driver = {
+ .driver = {
+ .name = "mpc5200b-fec-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_fec_mdio_match,
+ },
+ .probe = mpc52xx_fec_mdio_probe,
+ .remove = mpc52xx_fec_mdio_remove,
+};
+
+/* let fec driver call it, since this has to be registered before it */
+EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/kernel/drivers/net/ethernet/freescale/fec_ptp.c b/kernel/drivers/net/ethernet/freescale/fec_ptp.c
new file mode 100644
index 000000000..a583d89b1
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fec_ptp.c
@@ -0,0 +1,637 @@
+/*
+ * Fast Ethernet Controller (ENET) PTP driver for MX6x.
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+
+#include "fec.h"
+
+/* FEC 1588 register bits */
+#define FEC_T_CTRL_SLAVE 0x00002000
+#define FEC_T_CTRL_CAPTURE 0x00000800
+#define FEC_T_CTRL_RESTART 0x00000200
+#define FEC_T_CTRL_PERIOD_RST 0x00000030
+#define FEC_T_CTRL_PERIOD_EN 0x00000010
+#define FEC_T_CTRL_ENABLE 0x00000001
+
+#define FEC_T_INC_MASK 0x0000007f
+#define FEC_T_INC_OFFSET 0
+#define FEC_T_INC_CORR_MASK 0x00007f00
+#define FEC_T_INC_CORR_OFFSET 8
+
+#define FEC_T_CTRL_PINPER 0x00000080
+#define FEC_T_TF0_MASK 0x00000001
+#define FEC_T_TF0_OFFSET 0
+#define FEC_T_TF1_MASK 0x00000002
+#define FEC_T_TF1_OFFSET 1
+#define FEC_T_TF2_MASK 0x00000004
+#define FEC_T_TF2_OFFSET 2
+#define FEC_T_TF3_MASK 0x00000008
+#define FEC_T_TF3_OFFSET 3
+#define FEC_T_TDRE_MASK 0x00000001
+#define FEC_T_TDRE_OFFSET 0
+#define FEC_T_TMODE_MASK 0x0000003C
+#define FEC_T_TMODE_OFFSET 2
+#define FEC_T_TIE_MASK 0x00000040
+#define FEC_T_TIE_OFFSET 6
+#define FEC_T_TF_MASK 0x00000080
+#define FEC_T_TF_OFFSET 7
+
+#define FEC_ATIME_CTRL 0x400
+#define FEC_ATIME 0x404
+#define FEC_ATIME_EVT_OFFSET 0x408
+#define FEC_ATIME_EVT_PERIOD 0x40c
+#define FEC_ATIME_CORR 0x410
+#define FEC_ATIME_INC 0x414
+#define FEC_TS_TIMESTAMP 0x418
+
+#define FEC_TGSR 0x604
+#define FEC_TCSR(n) (0x608 + n * 0x08)
+#define FEC_TCCR(n) (0x60C + n * 0x08)
+#define MAX_TIMER_CHANNEL 3
+#define FEC_TMODE_TOGGLE 0x05
+#define FEC_HIGH_PULSE 0x0F
+
+#define FEC_CC_MULT (1 << 31)
+#define FEC_COUNTER_PERIOD (1 << 31)
+#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
+#define FEC_CHANNLE_0 0
+#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
+
+/**
+ * fec_ptp_enable_pps
+ * @fep: the fec_enet_private structure handle
+ * @enable: enable the channel pps output
+ *
+ * This function enble the PPS ouput on the timer channel.
+ */
+static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
+{
+ unsigned long flags;
+ u32 val, tempval;
+ int inc;
+ struct timespec ts;
+ u64 ns;
+ u32 remainder;
+ val = 0;
+
+ if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
+ dev_err(&fep->pdev->dev, "No ptp stack is running\n");
+ return -EINVAL;
+ }
+
+ if (fep->pps_enable == enable)
+ return 0;
+
+ fep->pps_channel = DEFAULT_PPS_CHANNEL;
+ fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+ inc = fep->ptp_inc;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ if (enable) {
+ /* clear capture or output compare interrupt status if have.
+ */
+ writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
+
+ /* It is recommended to double check the TMODE field in the
+ * TCSR register to be cleared before the first compare counter
+ * is written into TCCR register. Just add a double check.
+ */
+ val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+ do {
+ val &= ~(FEC_T_TMODE_MASK);
+ writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
+ val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+ } while (val & FEC_T_TMODE_MASK);
+
+ /* Dummy read counter to update the counter */
+ timecounter_read(&fep->tc);
+ /* We want to find the first compare event in the next
+ * second point. So we need to know what the ptp time
+ * is now and how many nanoseconds is ahead to get next second.
+ * The remaining nanosecond ahead before the next second would be
+ * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
+ * to current timer would be next second.
+ */
+ tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ tempval |= FEC_T_CTRL_CAPTURE;
+ writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+ tempval = readl(fep->hwp + FEC_ATIME);
+ /* Convert the ptp local counter to 1588 timestamp */
+ ns = timecounter_cyc2time(&fep->tc, tempval);
+ ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+ ts.tv_nsec = remainder;
+
+ /* The tempval is less than 3 seconds, and so val is less than
+ * 4 seconds. No overflow for 32bit calculation.
+ */
+ val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
+
+ /* Need to consider the situation that the current time is
+ * very close to the second point, which means NSEC_PER_SEC
+ * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
+ * is still running when we calculate the first compare event, it is
+ * possible that the remaining nanoseonds run out before the compare
+ * counter is calculated and written into TCCR register. To avoid
+ * this possibility, we will set the compare event to be the next
+ * of next second. The current setting is 31-bit timer and wrap
+ * around over 2 seconds. So it is okay to set the next of next
+ * seond for the timer.
+ */
+ val += NSEC_PER_SEC;
+
+ /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
+ * ptp counter, which maybe cause 32-bit wrap. Since the
+ * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
+ * We can ensure the wrap will not cause issue. If the offset
+ * is bigger than fep->cc.mask would be a error.
+ */
+ val &= fep->cc.mask;
+ writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
+
+ /* Calculate the second the compare event timestamp */
+ fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
+
+ /* * Enable compare event when overflow */
+ val = readl(fep->hwp + FEC_ATIME_CTRL);
+ val |= FEC_T_CTRL_PINPER;
+ writel(val, fep->hwp + FEC_ATIME_CTRL);
+
+ /* Compare channel setting. */
+ val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+ val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
+ val &= ~(1 << FEC_T_TDRE_OFFSET);
+ val &= ~(FEC_T_TMODE_MASK);
+ val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
+ writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
+
+ /* Write the second compare event timestamp and calculate
+ * the third timestamp. Refer the TCCR register detail in the spec.
+ */
+ writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
+ fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
+ } else {
+ writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
+ }
+
+ fep->pps_enable = enable;
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * fec_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static cycle_t fec_ptp_read(const struct cyclecounter *cc)
+{
+ struct fec_enet_private *fep =
+ container_of(cc, struct fec_enet_private, cc);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ u32 tempval;
+
+ tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ tempval |= FEC_T_CTRL_CAPTURE;
+ writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+ if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
+ udelay(1);
+
+ return readl(fep->hwp + FEC_ATIME);
+}
+
+/**
+ * fec_ptp_start_cyclecounter - create the cycle counter from hw
+ * @ndev: network device
+ *
+ * this function initializes the timecounter and cyclecounter
+ * structures for use in generated a ns counter from the arbitrary
+ * fixed point cycles registers in the hardware.
+ */
+void fec_ptp_start_cyclecounter(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned long flags;
+ int inc;
+
+ inc = 1000000000 / fep->cycle_speed;
+
+ /* grab the ptp lock */
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ /* 1ns counter */
+ writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
+
+ /* use 31-bit timer counter */
+ writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
+
+ writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
+ fep->hwp + FEC_ATIME_CTRL);
+
+ memset(&fep->cc, 0, sizeof(fep->cc));
+ fep->cc.read = fec_ptp_read;
+ fep->cc.mask = CLOCKSOURCE_MASK(31);
+ fep->cc.shift = 31;
+ fep->cc.mult = FEC_CC_MULT;
+
+ /* reset the ns time counter */
+ timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real()));
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+}
+
+/**
+ * fec_ptp_adjfreq - adjust ptp cycle frequency
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ *
+ * Because ENET hardware frequency adjust is complex,
+ * using software method to do that.
+ */
+static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ unsigned long flags;
+ int neg_adj = 0;
+ u32 i, tmp;
+ u32 corr_inc, corr_period;
+ u32 corr_ns;
+ u64 lhs, rhs;
+
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+
+ if (ppb == 0)
+ return 0;
+
+ if (ppb < 0) {
+ ppb = -ppb;
+ neg_adj = 1;
+ }
+
+ /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC;
+ * Try to find the corr_inc between 1 to fep->ptp_inc to
+ * meet adjustment requirement.
+ */
+ lhs = NSEC_PER_SEC;
+ rhs = (u64)ppb * (u64)fep->ptp_inc;
+ for (i = 1; i <= fep->ptp_inc; i++) {
+ if (lhs >= rhs) {
+ corr_inc = i;
+ corr_period = div_u64(lhs, rhs);
+ break;
+ }
+ lhs += NSEC_PER_SEC;
+ }
+ /* Not found? Set it to high value - double speed
+ * correct in every clock step.
+ */
+ if (i > fep->ptp_inc) {
+ corr_inc = fep->ptp_inc;
+ corr_period = 1;
+ }
+
+ if (neg_adj)
+ corr_ns = fep->ptp_inc - corr_inc;
+ else
+ corr_ns = fep->ptp_inc + corr_inc;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
+ tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
+ writel(tmp, fep->hwp + FEC_ATIME_INC);
+ writel(corr_period, fep->hwp + FEC_ATIME_CORR);
+ /* dummy read to update the timer. */
+ timecounter_read(&fep->tc);
+
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * fec_ptp_adjtime
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ timecounter_adjtime(&fep->tc, delta);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * fec_ptp_gettime
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct fec_enet_private *adapter =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+ u64 ns;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ ns = timecounter_read(&adapter->tc);
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+/**
+ * fec_ptp_settime
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int fec_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+
+ u64 ns;
+ unsigned long flags;
+ u32 counter;
+
+ mutex_lock(&fep->ptp_clk_mutex);
+ /* Check the ptp clock */
+ if (!fep->ptp_clk_on) {
+ mutex_unlock(&fep->ptp_clk_mutex);
+ return -EINVAL;
+ }
+
+ ns = timespec64_to_ns(ts);
+ /* Get the timer value based on timestamp.
+ * Update the counter with the masked value.
+ */
+ counter = ns & fep->cc.mask;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ writel(counter, fep->hwp + FEC_ATIME);
+ timecounter_init(&fep->tc, &fep->cc, ns);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
+ return 0;
+}
+
+/**
+ * fec_ptp_enable
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
+ *
+ */
+static int fec_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+ int ret = 0;
+
+ if (rq->type == PTP_CLK_REQ_PPS) {
+ ret = fec_ptp_enable_pps(fep, on);
+
+ return ret;
+ }
+ return -EOPNOTSUPP;
+}
+
+/**
+ * fec_ptp_hwtstamp_ioctl - control hardware time stamping
+ * @ndev: pointer to net_device
+ * @ifreq: ioctl data
+ * @cmd: particular ioctl requested
+ */
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ fep->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ fep->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ if (fep->hwts_rx_en)
+ fep->hwts_rx_en = 0;
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ default:
+ /*
+ * register RXMTRL must be set in order to do V1 packets,
+ * therefore it is not possible to time stamp both V1 Sync and
+ * Delay_Req messages and hardware does not support
+ * timestamping all packets => return error
+ */
+ fep->hwts_rx_en = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = (fep->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * fec_time_keep - call timecounter_read every second to avoid timer overrun
+ * because ENET just support 32bit counter, will timeout in 4s
+ */
+static void fec_time_keep(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
+ u64 ns;
+ unsigned long flags;
+
+ mutex_lock(&fep->ptp_clk_mutex);
+ if (fep->ptp_clk_on) {
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ ns = timecounter_read(&fep->tc);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ }
+ mutex_unlock(&fep->ptp_clk_mutex);
+
+ schedule_delayed_work(&fep->time_keep, HZ);
+}
+
+/**
+ * fec_ptp_init
+ * @ndev: The FEC network adapter
+ *
+ * This function performs the required steps for enabling ptp
+ * support. If ptp support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+
+void fec_ptp_init(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ fep->ptp_caps.owner = THIS_MODULE;
+ snprintf(fep->ptp_caps.name, 16, "fec ptp");
+
+ fep->ptp_caps.max_adj = 250000000;
+ fep->ptp_caps.n_alarm = 0;
+ fep->ptp_caps.n_ext_ts = 0;
+ fep->ptp_caps.n_per_out = 0;
+ fep->ptp_caps.n_pins = 0;
+ fep->ptp_caps.pps = 1;
+ fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
+ fep->ptp_caps.adjtime = fec_ptp_adjtime;
+ fep->ptp_caps.gettime64 = fec_ptp_gettime;
+ fep->ptp_caps.settime64 = fec_ptp_settime;
+ fep->ptp_caps.enable = fec_ptp_enable;
+
+ fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+ fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
+
+ spin_lock_init(&fep->tmreg_lock);
+
+ fec_ptp_start_cyclecounter(ndev);
+
+ INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
+
+ fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
+ if (IS_ERR(fep->ptp_clock)) {
+ fep->ptp_clock = NULL;
+ pr_err("ptp_clock_register failed\n");
+ }
+
+ schedule_delayed_work(&fep->time_keep, HZ);
+}
+
+/**
+ * fec_ptp_check_pps_event
+ * @fep: the fec_enet_private structure handle
+ *
+ * This function check the pps event and reload the timer compare counter.
+ */
+uint fec_ptp_check_pps_event(struct fec_enet_private *fep)
+{
+ u32 val;
+ u8 channel = fep->pps_channel;
+ struct ptp_clock_event event;
+
+ val = readl(fep->hwp + FEC_TCSR(channel));
+ if (val & FEC_T_TF_MASK) {
+ /* Write the next next compare(not the next according the spec)
+ * value to the register
+ */
+ writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
+ do {
+ writel(val, fep->hwp + FEC_TCSR(channel));
+ } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
+
+ /* Update the counter; */
+ fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
+
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(fep->ptp_clock, &event);
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/Kconfig b/kernel/drivers/net/ethernet/freescale/fs_enet/Kconfig
new file mode 100644
index 000000000..be92229f2
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fs_enet/Kconfig
@@ -0,0 +1,34 @@
+config FS_ENET
+ tristate "Freescale Ethernet Driver"
+ depends on NET_VENDOR_FREESCALE && (CPM1 || CPM2 || PPC_MPC512x)
+ select MII
+ select PHYLIB
+
+config FS_ENET_MPC5121_FEC
+ def_bool y if (FS_ENET && PPC_MPC512x)
+ select FS_ENET_HAS_FEC
+
+config FS_ENET_HAS_SCC
+ bool "Chip has an SCC usable for ethernet"
+ depends on FS_ENET && (CPM1 || CPM2)
+ default y
+
+config FS_ENET_HAS_FCC
+ bool "Chip has an FCC usable for ethernet"
+ depends on FS_ENET && CPM2
+ default y
+
+config FS_ENET_HAS_FEC
+ bool "Chip has an FEC usable for ethernet"
+ depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
+ select FS_ENET_MDIO_FEC
+ default y
+
+config FS_ENET_MDIO_FEC
+ tristate "MDIO driver for FEC"
+ depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
+
+config FS_ENET_MDIO_FCC
+ tristate "MDIO driver for FCC"
+ depends on FS_ENET && CPM2
+ select MDIO_BITBANG
diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/Makefile b/kernel/drivers/net/ethernet/freescale/fs_enet/Makefile
new file mode 100644
index 000000000..d4a305ee3
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fs_enet/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the Freescale Ethernet controllers
+#
+
+obj-$(CONFIG_FS_ENET) += fs_enet.o
+
+fs_enet-$(CONFIG_FS_ENET_HAS_SCC) += mac-scc.o
+fs_enet-$(CONFIG_FS_ENET_HAS_FEC) += mac-fec.o
+fs_enet-$(CONFIG_FS_ENET_HAS_FCC) += mac-fcc.o
+
+obj-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o
+obj-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o
+
+fs_enet-objs := fs_enet-main.o $(fs_enet-m)
diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/fec.h b/kernel/drivers/net/ethernet/freescale/fs_enet/fec.h
new file mode 100644
index 000000000..b9fe5bde4
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fs_enet/fec.h
@@ -0,0 +1,44 @@
+#ifndef FS_ENET_FEC_H
+#define FS_ENET_FEC_H
+
+/* CRC polynomium used by the FEC for the multicast group filtering */
+#define FEC_CRC_POLY 0x04C11DB7
+
+#define FEC_MAX_MULTICAST_ADDRS 64
+
+/* Interrupt events/masks.
+*/
+#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
+#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
+#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
+#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
+#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
+#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
+#define FEC_ENET_RXF 0x02000000U /* Full frame received */
+#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
+#define FEC_ENET_MII 0x00800000U /* MII interrupt */
+#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
+
+#define FEC_ECNTRL_PINMUX 0x00000004
+#define FEC_ECNTRL_ETHER_EN 0x00000002
+#define FEC_ECNTRL_RESET 0x00000001
+
+/* RMII mode enabled only when MII_MODE bit is set too. */
+#define FEC_RCNTRL_RMII_MODE (0x00000100 | \
+ FEC_RCNTRL_MII_MODE | FEC_RCNTRL_FCE)
+#define FEC_RCNTRL_FCE 0x00000020
+#define FEC_RCNTRL_BC_REJ 0x00000010
+#define FEC_RCNTRL_PROM 0x00000008
+#define FEC_RCNTRL_MII_MODE 0x00000004
+#define FEC_RCNTRL_DRT 0x00000002
+#define FEC_RCNTRL_LOOP 0x00000001
+
+#define FEC_TCNTRL_FDEN 0x00000004
+#define FEC_TCNTRL_HBC 0x00000002
+#define FEC_TCNTRL_GTS 0x00000001
+
+/*
+ * Delay to wait for FEC reset command to complete (in us)
+ */
+#define FEC_RESET_DELAY 50
+#endif
diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/kernel/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
new file mode 100644
index 000000000..9b3639eae
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -0,0 +1,1141 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
+ * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+
+#include <linux/vmalloc.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "fs_enet.h"
+
+/*************************************************/
+
+MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
+MODULE_DESCRIPTION("Freescale Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
+module_param(fs_enet_debug, int, 0);
+MODULE_PARM_DESC(fs_enet_debug,
+ "Freescale bitmapped debugging message enable value");
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void fs_enet_netpoll(struct net_device *dev);
+#endif
+
+static void fs_set_multicast_list(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ (*fep->ops->set_multicast_list)(dev);
+}
+
+static void skb_align(struct sk_buff *skb, int align)
+{
+ int off = ((unsigned long)skb->data) & (align - 1);
+
+ if (off)
+ skb_reserve(skb, align - off);
+}
+
+/* NAPI receive function */
+static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
+ struct net_device *dev = fep->ndev;
+ const struct fs_platform_info *fpi = fep->fpi;
+ cbd_t __iomem *bdp;
+ struct sk_buff *skb, *skbn, *skbt;
+ int received = 0;
+ u16 pkt_len, sc;
+ int curidx;
+
+ if (budget <= 0)
+ return received;
+
+ /*
+ * First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = fep->cur_rx;
+
+ /* clear RX status bits for napi*/
+ (*fep->ops->napi_clear_rx_event)(dev);
+
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
+ curidx = bdp - fep->rx_bd_base;
+
+ /*
+ * Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((sc & BD_ENET_RX_LAST) == 0)
+ dev_warn(fep->dev, "rcv is not +last\n");
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
+ BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ fep->stats.rx_errors++;
+ /* Frame too long or too short. */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
+ fep->stats.rx_length_errors++;
+ /* Frame alignment */
+ if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+ fep->stats.rx_frame_errors++;
+ /* CRC Error */
+ if (sc & BD_ENET_RX_CR)
+ fep->stats.rx_crc_errors++;
+ /* FIFO overrun */
+ if (sc & BD_ENET_RX_OV)
+ fep->stats.rx_crc_errors++;
+
+ skb = fep->rx_skbuff[curidx];
+
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ skbn = skb;
+
+ } else {
+ skb = fep->rx_skbuff[curidx];
+
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ /*
+ * Process the incoming frame.
+ */
+ fep->stats.rx_packets++;
+ pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
+ fep->stats.rx_bytes += pkt_len + 4;
+
+ if (pkt_len <= fpi->rx_copybreak) {
+ /* +2 to make IP header L1 cache aligned */
+ skbn = netdev_alloc_skb(dev, pkt_len + 2);
+ if (skbn != NULL) {
+ skb_reserve(skbn, 2); /* align IP header */
+ skb_copy_from_linear_data(skb,
+ skbn->data, pkt_len);
+ /* swap */
+ skbt = skb;
+ skb = skbn;
+ skbn = skbt;
+ }
+ } else {
+ skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
+
+ if (skbn)
+ skb_align(skbn, ENET_RX_ALIGN);
+ }
+
+ if (skbn != NULL) {
+ skb_put(skb, pkt_len); /* Make room */
+ skb->protocol = eth_type_trans(skb, dev);
+ received++;
+ netif_receive_skb(skb);
+ } else {
+ fep->stats.rx_dropped++;
+ skbn = skb;
+ }
+ }
+
+ fep->rx_skbuff[curidx] = skbn;
+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
+ CBDW_DATLEN(bdp, 0);
+ CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
+
+ /*
+ * Update BD pointer to next entry.
+ */
+ if ((sc & BD_ENET_RX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->rx_bd_base;
+
+ (*fep->ops->rx_bd_done)(dev);
+
+ if (received >= budget)
+ break;
+ }
+
+ fep->cur_rx = bdp;
+
+ if (received < budget) {
+ /* done */
+ napi_complete(napi);
+ (*fep->ops->napi_enable_rx)(dev);
+ }
+ return received;
+}
+
+static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
+{
+ struct fs_enet_private *fep = container_of(napi, struct fs_enet_private,
+ napi_tx);
+ struct net_device *dev = fep->ndev;
+ cbd_t __iomem *bdp;
+ struct sk_buff *skb;
+ int dirtyidx, do_wake, do_restart;
+ u16 sc;
+ int has_tx_work = 0;
+
+ spin_lock(&fep->tx_lock);
+ bdp = fep->dirty_tx;
+
+ /* clear TX status bits for napi*/
+ (*fep->ops->napi_clear_tx_event)(dev);
+
+ do_wake = do_restart = 0;
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
+ dirtyidx = bdp - fep->tx_bd_base;
+
+ if (fep->tx_free == fep->tx_ring)
+ break;
+
+ skb = fep->tx_skbuff[dirtyidx];
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
+
+ if (sc & BD_ENET_TX_HB) /* No heartbeat */
+ fep->stats.tx_heartbeat_errors++;
+ if (sc & BD_ENET_TX_LC) /* Late collision */
+ fep->stats.tx_window_errors++;
+ if (sc & BD_ENET_TX_RL) /* Retrans limit */
+ fep->stats.tx_aborted_errors++;
+ if (sc & BD_ENET_TX_UN) /* Underrun */
+ fep->stats.tx_fifo_errors++;
+ if (sc & BD_ENET_TX_CSL) /* Carrier lost */
+ fep->stats.tx_carrier_errors++;
+
+ if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
+ fep->stats.tx_errors++;
+ do_restart = 1;
+ }
+ } else
+ fep->stats.tx_packets++;
+
+ if (sc & BD_ENET_TX_READY) {
+ dev_warn(fep->dev,
+ "HEY! Enet xmit interrupt and TX_READY.\n");
+ }
+
+ /*
+ * Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+ if (sc & BD_ENET_TX_DEF)
+ fep->stats.collisions++;
+
+ /* unmap */
+ if (fep->mapped_as_page[dirtyidx])
+ dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+ else
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+
+ /*
+ * Free the sk buffer associated with this last transmit.
+ */
+ if (skb) {
+ dev_kfree_skb(skb);
+ fep->tx_skbuff[dirtyidx] = NULL;
+ }
+
+ /*
+ * Update pointer to next buffer descriptor to be transmitted.
+ */
+ if ((sc & BD_ENET_TX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->tx_bd_base;
+
+ /*
+ * Since we have freed up a buffer, the ring is no longer
+ * full.
+ */
+ if (++fep->tx_free >= MAX_SKB_FRAGS)
+ do_wake = 1;
+ has_tx_work = 1;
+ }
+
+ fep->dirty_tx = bdp;
+
+ if (do_restart)
+ (*fep->ops->tx_restart)(dev);
+
+ if (!has_tx_work) {
+ napi_complete(napi);
+ (*fep->ops->napi_enable_tx)(dev);
+ }
+
+ spin_unlock(&fep->tx_lock);
+
+ if (do_wake)
+ netif_wake_queue(dev);
+
+ if (has_tx_work)
+ return budget;
+ return 0;
+}
+
+/*
+ * The interrupt handler.
+ * This is called from the MPC core interrupt.
+ */
+static irqreturn_t
+fs_enet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct fs_enet_private *fep;
+ const struct fs_platform_info *fpi;
+ u32 int_events;
+ u32 int_clr_events;
+ int nr, napi_ok;
+ int handled;
+
+ fep = netdev_priv(dev);
+ fpi = fep->fpi;
+
+ nr = 0;
+ while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
+ nr++;
+
+ int_clr_events = int_events;
+ int_clr_events &= ~fep->ev_napi_rx;
+
+ (*fep->ops->clear_int_events)(dev, int_clr_events);
+
+ if (int_events & fep->ev_err)
+ (*fep->ops->ev_error)(dev, int_events);
+
+ if (int_events & fep->ev_rx) {
+ napi_ok = napi_schedule_prep(&fep->napi);
+
+ (*fep->ops->napi_disable_rx)(dev);
+ (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
+
+ /* NOTE: it is possible for FCCs in NAPI mode */
+ /* to submit a spurious interrupt while in poll */
+ if (napi_ok)
+ __napi_schedule(&fep->napi);
+ }
+
+ if (int_events & fep->ev_tx) {
+ napi_ok = napi_schedule_prep(&fep->napi_tx);
+
+ (*fep->ops->napi_disable_tx)(dev);
+ (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx);
+
+ /* NOTE: it is possible for FCCs in NAPI mode */
+ /* to submit a spurious interrupt while in poll */
+ if (napi_ok)
+ __napi_schedule(&fep->napi_tx);
+ }
+ }
+
+ handled = nr > 0;
+ return IRQ_RETVAL(handled);
+}
+
+void fs_init_bds(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ cbd_t __iomem *bdp;
+ struct sk_buff *skb;
+ int i;
+
+ fs_cleanup_bds(dev);
+
+ fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->tx_free = fep->tx_ring;
+ fep->cur_rx = fep->rx_bd_base;
+
+ /*
+ * Initialize the receive buffer descriptors.
+ */
+ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
+ skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
+ if (skb == NULL)
+ break;
+
+ skb_align(skb, ENET_RX_ALIGN);
+ fep->rx_skbuff[i] = skb;
+ CBDW_BUFADDR(bdp,
+ dma_map_single(fep->dev, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
+ CBDW_DATLEN(bdp, 0); /* zero */
+ CBDW_SC(bdp, BD_ENET_RX_EMPTY |
+ ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
+ }
+ /*
+ * if we failed, fillup remainder
+ */
+ for (; i < fep->rx_ring; i++, bdp++) {
+ fep->rx_skbuff[i] = NULL;
+ CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
+ }
+
+ /*
+ * ...and the same for transmit.
+ */
+ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
+ fep->tx_skbuff[i] = NULL;
+ CBDW_BUFADDR(bdp, 0);
+ CBDW_DATLEN(bdp, 0);
+ CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
+ }
+}
+
+void fs_cleanup_bds(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct sk_buff *skb;
+ cbd_t __iomem *bdp;
+ int i;
+
+ /*
+ * Reset SKB transmit buffers.
+ */
+ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
+ if ((skb = fep->tx_skbuff[i]) == NULL)
+ continue;
+
+ /* unmap */
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ skb->len, DMA_TO_DEVICE);
+
+ fep->tx_skbuff[i] = NULL;
+ dev_kfree_skb(skb);
+ }
+
+ /*
+ * Reset SKB receive buffers
+ */
+ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
+ if ((skb = fep->rx_skbuff[i]) == NULL)
+ continue;
+
+ /* unmap */
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ fep->rx_skbuff[i] = NULL;
+
+ dev_kfree_skb(skb);
+ }
+}
+
+/**********************************************************************************/
+
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+/*
+ * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
+ */
+static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct sk_buff *new_skb;
+
+ /* Alloc new skb */
+ new_skb = netdev_alloc_skb(dev, skb->len + 4);
+ if (!new_skb)
+ return NULL;
+
+ /* Make sure new skb is properly aligned */
+ skb_align(new_skb, 4);
+
+ /* Copy data to new skb ... */
+ skb_copy_from_linear_data(skb, new_skb->data, skb->len);
+ skb_put(new_skb, skb->len);
+
+ /* ... and free an old one */
+ dev_kfree_skb_any(skb);
+
+ return new_skb;
+}
+#endif
+
+static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ cbd_t __iomem *bdp;
+ int curidx;
+ u16 sc;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ skb_frag_t *frag;
+ int len;
+
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ if (((unsigned long)skb->data) & 0x3) {
+ skb = tx_skb_align_workaround(dev, skb);
+ if (!skb) {
+ /*
+ * We have lost packet due to memory allocation error
+ * in tx_skb_align_workaround(). Hopefully original
+ * skb is still valid, so try transmit it later.
+ */
+ return NETDEV_TX_BUSY;
+ }
+ }
+#endif
+ spin_lock(&fep->tx_lock);
+
+ /*
+ * Fill in a Tx ring entry
+ */
+ bdp = fep->cur_tx;
+
+ if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
+ netif_stop_queue(dev);
+ spin_unlock(&fep->tx_lock);
+
+ /*
+ * Ooops. All transmit buffers are full. Bail out.
+ * This should not happen, since the tx queue should be stopped.
+ */
+ dev_warn(fep->dev, "tx queue full!.\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ curidx = bdp - fep->tx_bd_base;
+
+ len = skb->len;
+ fep->stats.tx_bytes += len;
+ if (nr_frags)
+ len -= skb->data_len;
+ fep->tx_free -= nr_frags + 1;
+ /*
+ * Push the data cache so the CPM does not get stale memory data.
+ */
+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
+ skb->data, len, DMA_TO_DEVICE));
+ CBDW_DATLEN(bdp, len);
+
+ fep->mapped_as_page[curidx] = 0;
+ frag = skb_shinfo(skb)->frags;
+ while (nr_frags) {
+ CBDC_SC(bdp,
+ BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ CBDS_SC(bdp, BD_ENET_TX_READY);
+
+ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
+ bdp++, curidx++;
+ else
+ bdp = fep->tx_bd_base, curidx = 0;
+
+ len = skb_frag_size(frag);
+ CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
+ DMA_TO_DEVICE));
+ CBDW_DATLEN(bdp, len);
+
+ fep->tx_skbuff[curidx] = NULL;
+ fep->mapped_as_page[curidx] = 1;
+
+ frag++;
+ nr_frags--;
+ }
+
+ /* Trigger transmission start */
+ sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
+ BD_ENET_TX_LAST | BD_ENET_TX_TC;
+
+ /* note that while FEC does not have this bit
+ * it marks it as available for software use
+ * yay for hw reuse :) */
+ if (skb->len <= 60)
+ sc |= BD_ENET_TX_PAD;
+ CBDC_SC(bdp, BD_ENET_TX_STATS);
+ CBDS_SC(bdp, sc);
+
+ /* Save skb pointer. */
+ fep->tx_skbuff[curidx] = skb;
+
+ /* If this was the last BD in the ring, start at the beginning again. */
+ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->tx_bd_base;
+ fep->cur_tx = bdp;
+
+ if (fep->tx_free < MAX_SKB_FRAGS)
+ netif_stop_queue(dev);
+
+ skb_tx_timestamp(skb);
+
+ (*fep->ops->tx_kickstart)(dev);
+
+ spin_unlock(&fep->tx_lock);
+
+ return NETDEV_TX_OK;
+}
+
+static void fs_timeout(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+ int wake = 0;
+
+ fep->stats.tx_errors++;
+
+ spin_lock_irqsave(&fep->lock, flags);
+
+ if (dev->flags & IFF_UP) {
+ phy_stop(fep->phydev);
+ (*fep->ops->stop)(dev);
+ (*fep->ops->restart)(dev);
+ phy_start(fep->phydev);
+ }
+
+ phy_start(fep->phydev);
+ wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ if (wake)
+ netif_wake_queue(dev);
+}
+
+/*-----------------------------------------------------------------------------
+ * generic link-change handler - should be sufficient for most cases
+ *-----------------------------------------------------------------------------*/
+static void generic_adjust_link(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phydev;
+ int new_state = 0;
+
+ if (phydev->link) {
+ /* adjust to duplex mode */
+ if (phydev->duplex != fep->oldduplex) {
+ new_state = 1;
+ fep->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != fep->oldspeed) {
+ new_state = 1;
+ fep->oldspeed = phydev->speed;
+ }
+
+ if (!fep->oldlink) {
+ new_state = 1;
+ fep->oldlink = 1;
+ }
+
+ if (new_state)
+ fep->ops->restart(dev);
+ } else if (fep->oldlink) {
+ new_state = 1;
+ fep->oldlink = 0;
+ fep->oldspeed = 0;
+ fep->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(fep))
+ phy_print_status(phydev);
+}
+
+
+static void fs_adjust_link(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->lock, flags);
+
+ if(fep->ops->adjust_link)
+ fep->ops->adjust_link(dev);
+ else
+ generic_adjust_link(dev);
+
+ spin_unlock_irqrestore(&fep->lock, flags);
+}
+
+static int fs_init_phy(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev;
+ phy_interface_t iface;
+
+ fep->oldlink = 0;
+ fep->oldspeed = 0;
+ fep->oldduplex = -1;
+
+ iface = fep->fpi->use_rmii ?
+ PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII;
+
+ phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
+ iface);
+ if (!phydev) {
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ fep->phydev = phydev;
+
+ return 0;
+}
+
+static int fs_enet_open(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ int r;
+ int err;
+
+ /* to initialize the fep->cur_rx,... */
+ /* not doing this, will cause a crash in fs_enet_rx_napi */
+ fs_init_bds(fep->ndev);
+
+ napi_enable(&fep->napi);
+ napi_enable(&fep->napi_tx);
+
+ /* Install our interrupt handler. */
+ r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
+ "fs_enet-mac", dev);
+ if (r != 0) {
+ dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
+ napi_disable(&fep->napi);
+ napi_disable(&fep->napi_tx);
+ return -EINVAL;
+ }
+
+ err = fs_init_phy(dev);
+ if (err) {
+ free_irq(fep->interrupt, dev);
+ napi_disable(&fep->napi);
+ napi_disable(&fep->napi_tx);
+ return err;
+ }
+ phy_start(fep->phydev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int fs_enet_close(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ napi_disable(&fep->napi);
+ napi_disable(&fep->napi_tx);
+ phy_stop(fep->phydev);
+
+ spin_lock_irqsave(&fep->lock, flags);
+ spin_lock(&fep->tx_lock);
+ (*fep->ops->stop)(dev);
+ spin_unlock(&fep->tx_lock);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ /* release any irqs */
+ phy_disconnect(fep->phydev);
+ fep->phydev = NULL;
+ free_irq(fep->interrupt, dev);
+
+ return 0;
+}
+
+static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ return &fep->stats;
+}
+
+/*************************************************************************/
+
+static void fs_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+}
+
+static int fs_get_regs_len(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return (*fep->ops->get_regs_len)(dev);
+}
+
+static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+ int r, len;
+
+ len = regs->len;
+
+ spin_lock_irqsave(&fep->lock, flags);
+ r = (*fep->ops->get_regs)(dev, p, &len);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ if (r == 0)
+ regs->version = 0;
+}
+
+static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (!fep->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(fep->phydev, cmd);
+}
+
+static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (!fep->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(fep->phydev, cmd);
+}
+
+static int fs_nway_reset(struct net_device *dev)
+{
+ return 0;
+}
+
+static u32 fs_get_msglevel(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ return fep->msg_enable;
+}
+
+static void fs_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fep->msg_enable = value;
+}
+
+static const struct ethtool_ops fs_ethtool_ops = {
+ .get_drvinfo = fs_get_drvinfo,
+ .get_regs_len = fs_get_regs_len,
+ .get_settings = fs_get_settings,
+ .set_settings = fs_set_settings,
+ .nway_reset = fs_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = fs_get_msglevel,
+ .set_msglevel = fs_set_msglevel,
+ .get_regs = fs_get_regs,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phy_mii_ioctl(fep->phydev, rq, cmd);
+}
+
+extern int fs_mii_connect(struct net_device *dev);
+extern void fs_mii_disconnect(struct net_device *dev);
+
+/**************************************************************************************/
+
+#ifdef CONFIG_FS_ENET_HAS_FEC
+#define IS_FEC(match) ((match)->data == &fs_fec_ops)
+#else
+#define IS_FEC(match) 0
+#endif
+
+static const struct net_device_ops fs_enet_netdev_ops = {
+ .ndo_open = fs_enet_open,
+ .ndo_stop = fs_enet_close,
+ .ndo_get_stats = fs_enet_get_stats,
+ .ndo_start_xmit = fs_enet_start_xmit,
+ .ndo_tx_timeout = fs_timeout,
+ .ndo_set_rx_mode = fs_set_multicast_list,
+ .ndo_do_ioctl = fs_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = fs_enet_netpoll,
+#endif
+};
+
+static const struct of_device_id fs_enet_match[];
+static int fs_enet_probe(struct platform_device *ofdev)
+{
+ const struct of_device_id *match;
+ struct net_device *ndev;
+ struct fs_enet_private *fep;
+ struct fs_platform_info *fpi;
+ const u32 *data;
+ struct clk *clk;
+ int err;
+ const u8 *mac_addr;
+ const char *phy_connection_type;
+ int privsize, len, ret = -ENODEV;
+
+ match = of_match_device(fs_enet_match, &ofdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
+ if (!fpi)
+ return -ENOMEM;
+
+ if (!IS_FEC(match)) {
+ data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
+ if (!data || len != 4)
+ goto out_free_fpi;
+
+ fpi->cp_command = *data;
+ }
+
+ fpi->rx_ring = 32;
+ fpi->tx_ring = 64;
+ fpi->rx_copybreak = 240;
+ fpi->napi_weight = 17;
+ fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
+ if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
+ err = of_phy_register_fixed_link(ofdev->dev.of_node);
+ if (err)
+ goto out_free_fpi;
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ fpi->phy_node = of_node_get(ofdev->dev.of_node);
+ }
+
+ if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
+ phy_connection_type = of_get_property(ofdev->dev.of_node,
+ "phy-connection-type", NULL);
+ if (phy_connection_type && !strcmp("rmii", phy_connection_type))
+ fpi->use_rmii = 1;
+ }
+
+ /* make clock lookup non-fatal (the driver is shared among platforms),
+ * but require enable to succeed when a clock was specified/found,
+ * keep a reference to the clock upon successful acquisition
+ */
+ clk = devm_clk_get(&ofdev->dev, "per");
+ if (!IS_ERR(clk)) {
+ err = clk_prepare_enable(clk);
+ if (err) {
+ ret = err;
+ goto out_free_fpi;
+ }
+ fpi->clk_per = clk;
+ }
+
+ privsize = sizeof(*fep) +
+ sizeof(struct sk_buff **) *
+ (fpi->rx_ring + fpi->tx_ring) +
+ sizeof(char) * fpi->tx_ring;
+
+ ndev = alloc_etherdev(privsize);
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ SET_NETDEV_DEV(ndev, &ofdev->dev);
+ platform_set_drvdata(ofdev, ndev);
+
+ fep = netdev_priv(ndev);
+ fep->dev = &ofdev->dev;
+ fep->ndev = ndev;
+ fep->fpi = fpi;
+ fep->ops = match->data;
+
+ ret = fep->ops->setup_data(ndev);
+ if (ret)
+ goto out_free_dev;
+
+ fep->rx_skbuff = (struct sk_buff **)&fep[1];
+ fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
+ fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring +
+ fpi->tx_ring);
+
+ spin_lock_init(&fep->lock);
+ spin_lock_init(&fep->tx_lock);
+
+ mac_addr = of_get_mac_address(ofdev->dev.of_node);
+ if (mac_addr)
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+
+ ret = fep->ops->allocate_bd(ndev);
+ if (ret)
+ goto out_cleanup_data;
+
+ fep->rx_bd_base = fep->ring_base;
+ fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
+
+ fep->tx_ring = fpi->tx_ring;
+ fep->rx_ring = fpi->rx_ring;
+
+ ndev->netdev_ops = &fs_enet_netdev_ops;
+ ndev->watchdog_timeo = 2 * HZ;
+ netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight);
+ netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2);
+
+ ndev->ethtool_ops = &fs_ethtool_ops;
+
+ init_timer(&fep->phy_timer_list);
+
+ netif_carrier_off(ndev);
+
+ ndev->features |= NETIF_F_SG;
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto out_free_bd;
+
+ pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
+
+ return 0;
+
+out_free_bd:
+ fep->ops->free_bd(ndev);
+out_cleanup_data:
+ fep->ops->cleanup_data(ndev);
+out_free_dev:
+ free_netdev(ndev);
+out_put:
+ of_node_put(fpi->phy_node);
+ if (fpi->clk_per)
+ clk_disable_unprepare(fpi->clk_per);
+out_free_fpi:
+ kfree(fpi);
+ return ret;
+}
+
+static int fs_enet_remove(struct platform_device *ofdev)
+{
+ struct net_device *ndev = platform_get_drvdata(ofdev);
+ struct fs_enet_private *fep = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+
+ fep->ops->free_bd(ndev);
+ fep->ops->cleanup_data(ndev);
+ dev_set_drvdata(fep->dev, NULL);
+ of_node_put(fep->fpi->phy_node);
+ if (fep->fpi->clk_per)
+ clk_disable_unprepare(fep->fpi->clk_per);
+ free_netdev(ndev);
+ return 0;
+}
+
+static const struct of_device_id fs_enet_match[] = {
+#ifdef CONFIG_FS_ENET_HAS_SCC
+ {
+ .compatible = "fsl,cpm1-scc-enet",
+ .data = (void *)&fs_scc_ops,
+ },
+ {
+ .compatible = "fsl,cpm2-scc-enet",
+ .data = (void *)&fs_scc_ops,
+ },
+#endif
+#ifdef CONFIG_FS_ENET_HAS_FCC
+ {
+ .compatible = "fsl,cpm2-fcc-enet",
+ .data = (void *)&fs_fcc_ops,
+ },
+#endif
+#ifdef CONFIG_FS_ENET_HAS_FEC
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ {
+ .compatible = "fsl,mpc5121-fec",
+ .data = (void *)&fs_fec_ops,
+ },
+ {
+ .compatible = "fsl,mpc5125-fec",
+ .data = (void *)&fs_fec_ops,
+ },
+#else
+ {
+ .compatible = "fsl,pq1-fec-enet",
+ .data = (void *)&fs_fec_ops,
+ },
+#endif
+#endif
+ {}
+};
+MODULE_DEVICE_TABLE(of, fs_enet_match);
+
+static struct platform_driver fs_enet_driver = {
+ .driver = {
+ .name = "fs_enet",
+ .of_match_table = fs_enet_match,
+ },
+ .probe = fs_enet_probe,
+ .remove = fs_enet_remove,
+};
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void fs_enet_netpoll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ fs_enet_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+module_platform_driver(fs_enet_driver);
diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/kernel/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
new file mode 100644
index 000000000..f184d8f95
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -0,0 +1,250 @@
+#ifndef FS_ENET_H
+#define FS_ENET_H
+
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/phy.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/fs_enet_pd.h>
+#include <asm/fs_pd.h>
+
+#ifdef CONFIG_CPM1
+#include <asm/cpm1.h>
+#endif
+
+#if defined(CONFIG_FS_ENET_HAS_FEC)
+#include <asm/cpm.h>
+
+#if defined(CONFIG_FS_ENET_MPC5121_FEC)
+/* MPC5121 FEC has different register layout */
+struct fec {
+ u32 fec_reserved0;
+ u32 fec_ievent; /* Interrupt event reg */
+ u32 fec_imask; /* Interrupt mask reg */
+ u32 fec_reserved1;
+ u32 fec_r_des_active; /* Receive descriptor reg */
+ u32 fec_x_des_active; /* Transmit descriptor reg */
+ u32 fec_reserved2[3];
+ u32 fec_ecntrl; /* Ethernet control reg */
+ u32 fec_reserved3[6];
+ u32 fec_mii_data; /* MII manage frame reg */
+ u32 fec_mii_speed; /* MII speed control reg */
+ u32 fec_reserved4[7];
+ u32 fec_mib_ctrlstat; /* MIB control/status reg */
+ u32 fec_reserved5[7];
+ u32 fec_r_cntrl; /* Receive control reg */
+ u32 fec_reserved6[15];
+ u32 fec_x_cntrl; /* Transmit Control reg */
+ u32 fec_reserved7[7];
+ u32 fec_addr_low; /* Low 32bits MAC address */
+ u32 fec_addr_high; /* High 16bits MAC address */
+ u32 fec_opd; /* Opcode + Pause duration */
+ u32 fec_reserved8[10];
+ u32 fec_hash_table_high; /* High 32bits hash table */
+ u32 fec_hash_table_low; /* Low 32bits hash table */
+ u32 fec_grp_hash_table_high; /* High 32bits hash table */
+ u32 fec_grp_hash_table_low; /* Low 32bits hash table */
+ u32 fec_reserved9[7];
+ u32 fec_x_wmrk; /* FIFO transmit water mark */
+ u32 fec_reserved10;
+ u32 fec_r_bound; /* FIFO receive bound reg */
+ u32 fec_r_fstart; /* FIFO receive start reg */
+ u32 fec_reserved11[11];
+ u32 fec_r_des_start; /* Receive descriptor ring */
+ u32 fec_x_des_start; /* Transmit descriptor ring */
+ u32 fec_r_buff_size; /* Maximum receive buff size */
+ u32 fec_reserved12[26];
+ u32 fec_dma_control; /* DMA Endian and other ctrl */
+};
+#endif
+
+struct fec_info {
+ struct fec __iomem *fecp;
+ u32 mii_speed;
+};
+#endif
+
+#ifdef CONFIG_CPM2
+#include <asm/cpm2.h>
+#endif
+
+/* hw driver ops */
+struct fs_ops {
+ int (*setup_data)(struct net_device *dev);
+ int (*allocate_bd)(struct net_device *dev);
+ void (*free_bd)(struct net_device *dev);
+ void (*cleanup_data)(struct net_device *dev);
+ void (*set_multicast_list)(struct net_device *dev);
+ void (*adjust_link)(struct net_device *dev);
+ void (*restart)(struct net_device *dev);
+ void (*stop)(struct net_device *dev);
+ void (*napi_clear_rx_event)(struct net_device *dev);
+ void (*napi_enable_rx)(struct net_device *dev);
+ void (*napi_disable_rx)(struct net_device *dev);
+ void (*napi_clear_tx_event)(struct net_device *dev);
+ void (*napi_enable_tx)(struct net_device *dev);
+ void (*napi_disable_tx)(struct net_device *dev);
+ void (*rx_bd_done)(struct net_device *dev);
+ void (*tx_kickstart)(struct net_device *dev);
+ u32 (*get_int_events)(struct net_device *dev);
+ void (*clear_int_events)(struct net_device *dev, u32 int_events);
+ void (*ev_error)(struct net_device *dev, u32 int_events);
+ int (*get_regs)(struct net_device *dev, void *p, int *sizep);
+ int (*get_regs_len)(struct net_device *dev);
+ void (*tx_restart)(struct net_device *dev);
+};
+
+struct phy_info {
+ unsigned int id;
+ const char *name;
+ void (*startup) (struct net_device * dev);
+ void (*shutdown) (struct net_device * dev);
+ void (*ack_int) (struct net_device * dev);
+};
+
+/* The FEC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
+#define MIN_MTU 46 /* this is data size */
+#define CRC_LEN 4
+
+#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN)
+#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN)
+
+/* Must be a multiple of 32 (to cover both FEC & FCC) */
+#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31)
+/* This is needed so that invalidate_xxx wont invalidate too much */
+#define ENET_RX_ALIGN 16
+#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1)
+
+struct fs_enet_private {
+ struct napi_struct napi;
+ struct napi_struct napi_tx;
+ struct device *dev; /* pointer back to the device (must be initialized first) */
+ struct net_device *ndev;
+ spinlock_t lock; /* during all ops except TX pckt processing */
+ spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
+ struct fs_platform_info *fpi;
+ const struct fs_ops *ops;
+ int rx_ring, tx_ring;
+ dma_addr_t ring_mem_addr;
+ void __iomem *ring_base;
+ struct sk_buff **rx_skbuff;
+ struct sk_buff **tx_skbuff;
+ char *mapped_as_page;
+ cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */
+ cbd_t __iomem *tx_bd_base;
+ cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */
+ cbd_t __iomem *cur_rx;
+ cbd_t __iomem *cur_tx;
+ int tx_free;
+ struct net_device_stats stats;
+ struct timer_list phy_timer_list;
+ const struct phy_info *phy;
+ u32 msg_enable;
+ struct mii_if_info mii_if;
+ unsigned int last_mii_status;
+ int interrupt;
+
+ struct phy_device *phydev;
+ int oldduplex, oldspeed, oldlink; /* current settings */
+
+ /* event masks */
+ u32 ev_napi_rx; /* mask of NAPI rx events */
+ u32 ev_napi_tx; /* mask of NAPI rx events */
+ u32 ev_rx; /* rx event mask */
+ u32 ev_tx; /* tx event mask */
+ u32 ev_err; /* error event mask */
+
+ u16 bd_rx_empty; /* mask of BD rx empty */
+ u16 bd_rx_err; /* mask of BD rx errors */
+
+ union {
+ struct {
+ int idx; /* FEC1 = 0, FEC2 = 1 */
+ void __iomem *fecp; /* hw registers */
+ u32 hthi, htlo; /* state for multicast */
+ } fec;
+
+ struct {
+ int idx; /* FCC1-3 = 0-2 */
+ void __iomem *fccp; /* hw registers */
+ void __iomem *ep; /* parameter ram */
+ void __iomem *fcccp; /* hw registers cont. */
+ void __iomem *mem; /* FCC DPRAM */
+ u32 gaddrh, gaddrl; /* group address */
+ } fcc;
+
+ struct {
+ int idx; /* FEC1 = 0, FEC2 = 1 */
+ void __iomem *sccp; /* hw registers */
+ void __iomem *ep; /* parameter ram */
+ u32 hthi, htlo; /* state for multicast */
+ } scc;
+
+ };
+};
+
+/***************************************************************************/
+
+void fs_init_bds(struct net_device *dev);
+void fs_cleanup_bds(struct net_device *dev);
+
+/***************************************************************************/
+
+#define DRV_MODULE_NAME "fs_enet"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "1.1"
+#define DRV_MODULE_RELDATE "Sep 22, 2014"
+
+/***************************************************************************/
+
+int fs_enet_platform_init(void);
+void fs_enet_platform_cleanup(void);
+
+/***************************************************************************/
+/* buffer descriptor access macros */
+
+/* access macros */
+#if defined(CONFIG_CPM1)
+/* for a a CPM1 __raw_xxx's are sufficient */
+#define __cbd_out32(addr, x) __raw_writel(x, addr)
+#define __cbd_out16(addr, x) __raw_writew(x, addr)
+#define __cbd_in32(addr) __raw_readl(addr)
+#define __cbd_in16(addr) __raw_readw(addr)
+#else
+/* for others play it safe */
+#define __cbd_out32(addr, x) out_be32(addr, x)
+#define __cbd_out16(addr, x) out_be16(addr, x)
+#define __cbd_in32(addr) in_be32(addr)
+#define __cbd_in16(addr) in_be16(addr)
+#endif
+
+/* write */
+#define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc))
+#define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen))
+#define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr))
+
+/* read */
+#define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc)
+#define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen)
+#define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr)
+
+/* set bits */
+#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc))
+
+/* clear bits */
+#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc))
+
+/*******************************************************************/
+
+extern const struct fs_ops fs_fec_ops;
+extern const struct fs_ops fs_fcc_ops;
+extern const struct fs_ops fs_scc_ops;
+
+/*******************************************************************/
+
+#endif
diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
new file mode 100644
index 000000000..08f5b911d
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -0,0 +1,614 @@
+/*
+ * FCC driver for Motorola MPC82xx (PQ2).
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/gfp.h>
+
+#include <asm/immap_cpm2.h>
+#include <asm/mpc8260.h>
+#include <asm/cpm2.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "fs_enet.h"
+
+/*************************************************/
+
+/* FCC access macros */
+
+/* write, read, set bits, clear bits */
+#define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v))
+#define R32(_p, _m) in_be32(&(_p)->_m)
+#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
+#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
+
+#define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v))
+#define R16(_p, _m) in_be16(&(_p)->_m)
+#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
+#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
+
+#define W8(_p, _m, _v) out_8(&(_p)->_m, (_v))
+#define R8(_p, _m) in_8(&(_p)->_m)
+#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
+#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
+
+/*************************************************/
+
+#define FCC_MAX_MULTICAST_ADDRS 64
+
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
+#define mk_mii_end 0
+
+#define MAX_CR_CMD_LOOPS 10000
+
+static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op)
+{
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ return cpm_command(fpi->cp_command, op);
+}
+
+static int do_pd_setup(struct fs_enet_private *fep)
+{
+ struct platform_device *ofdev = to_platform_device(fep->dev);
+ struct fs_platform_info *fpi = fep->fpi;
+ int ret = -EINVAL;
+
+ fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ if (fep->interrupt == NO_IRQ)
+ goto out;
+
+ fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
+ if (!fep->fcc.fccp)
+ goto out;
+
+ fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1);
+ if (!fep->fcc.ep)
+ goto out_fccp;
+
+ fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2);
+ if (!fep->fcc.fcccp)
+ goto out_ep;
+
+ fep->fcc.mem = (void __iomem *)cpm2_immr;
+ fpi->dpram_offset = cpm_dpalloc(128, 32);
+ if (IS_ERR_VALUE(fpi->dpram_offset)) {
+ ret = fpi->dpram_offset;
+ goto out_fcccp;
+ }
+
+ return 0;
+
+out_fcccp:
+ iounmap(fep->fcc.fcccp);
+out_ep:
+ iounmap(fep->fcc.ep);
+out_fccp:
+ iounmap(fep->fcc.fccp);
+out:
+ return ret;
+}
+
+#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
+#define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXB)
+#define FCC_RX_EVENT (FCC_ENET_RXF)
+#define FCC_TX_EVENT (FCC_ENET_TXB)
+#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
+
+static int setup_data(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (do_pd_setup(fep) != 0)
+ return -EINVAL;
+
+ fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
+ fep->ev_napi_tx = FCC_NAPI_TX_EVENT_MSK;
+ fep->ev_rx = FCC_RX_EVENT;
+ fep->ev_tx = FCC_TX_EVENT;
+ fep->ev_err = FCC_ERR_EVENT_MSK;
+
+ return 0;
+}
+
+static int allocate_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->ring_base = (void __iomem __force *)dma_alloc_coherent(fep->dev,
+ (fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), &fep->ring_mem_addr,
+ GFP_KERNEL);
+ if (fep->ring_base == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ if (fep->ring_base)
+ dma_free_coherent(fep->dev,
+ (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
+ (void __force *)fep->ring_base, fep->ring_mem_addr);
+}
+
+static void cleanup_data(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void set_promiscuous_mode(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ S32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
+}
+
+static void set_multicast_start(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+
+ W32(ep, fen_gaddrh, 0);
+ W32(ep, fen_gaddrl, 0);
+}
+
+static void set_multicast_one(struct net_device *dev, const u8 *mac)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+ u16 taddrh, taddrm, taddrl;
+
+ taddrh = ((u16)mac[5] << 8) | mac[4];
+ taddrm = ((u16)mac[3] << 8) | mac[2];
+ taddrl = ((u16)mac[1] << 8) | mac[0];
+
+ W16(ep, fen_taddrh, taddrh);
+ W16(ep, fen_taddrm, taddrm);
+ W16(ep, fen_taddrl, taddrl);
+ fcc_cr_cmd(fep, CPM_CR_SET_GADDR);
+}
+
+static void set_multicast_finish(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+
+ /* clear promiscuous always */
+ C32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
+
+ /* if all multi or too many multicasts; just enable all */
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) {
+
+ W32(ep, fen_gaddrh, 0xffffffff);
+ W32(ep, fen_gaddrl, 0xffffffff);
+ }
+
+ /* read back */
+ fep->fcc.gaddrh = R32(ep, fen_gaddrh);
+ fep->fcc.gaddrl = R32(ep, fen_gaddrl);
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+
+ if ((dev->flags & IFF_PROMISC) == 0) {
+ set_multicast_start(dev);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
+ set_multicast_finish(dev);
+ } else
+ set_promiscuous_mode(dev);
+}
+
+static void restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+ fcc_c_t __iomem *fcccp = fep->fcc.fcccp;
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+ dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
+ u16 paddrh, paddrm, paddrl;
+ const unsigned char *mac;
+ int i;
+
+ C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
+
+ /* clear everything (slow & steady does it) */
+ for (i = 0; i < sizeof(*ep); i++)
+ out_8((u8 __iomem *)ep + i, 0);
+
+ /* get physical address */
+ rx_bd_base_phys = fep->ring_mem_addr;
+ tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
+
+ /* point to bds */
+ W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys);
+ W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys);
+
+ /* Set maximum bytes per receive buffer.
+ * It must be a multiple of 32.
+ */
+ W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE);
+
+ W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
+ W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
+
+ /* Allocate space in the reserved FCC area of DPRAM for the
+ * internal buffers. No one uses this space (yet), so we
+ * can do this. Later, we will add resource management for
+ * this area.
+ */
+
+ W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset);
+ W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32);
+
+ W16(ep, fen_padptr, fpi->dpram_offset + 64);
+
+ /* fill with special symbol... */
+ memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);
+
+ W32(ep, fen_genfcc.fcc_rbptr, 0);
+ W32(ep, fen_genfcc.fcc_tbptr, 0);
+ W32(ep, fen_genfcc.fcc_rcrc, 0);
+ W32(ep, fen_genfcc.fcc_tcrc, 0);
+ W16(ep, fen_genfcc.fcc_res1, 0);
+ W32(ep, fen_genfcc.fcc_res2, 0);
+
+ /* no CAM */
+ W32(ep, fen_camptr, 0);
+
+ /* Set CRC preset and mask */
+ W32(ep, fen_cmask, 0xdebb20e3);
+ W32(ep, fen_cpres, 0xffffffff);
+
+ W32(ep, fen_crcec, 0); /* CRC Error counter */
+ W32(ep, fen_alec, 0); /* alignment error counter */
+ W32(ep, fen_disfc, 0); /* discard frame counter */
+ W16(ep, fen_retlim, 15); /* Retry limit threshold */
+ W16(ep, fen_pper, 0); /* Normal persistence */
+
+ /* set group address */
+ W32(ep, fen_gaddrh, fep->fcc.gaddrh);
+ W32(ep, fen_gaddrl, fep->fcc.gaddrh);
+
+ /* Clear hash filter tables */
+ W32(ep, fen_iaddrh, 0);
+ W32(ep, fen_iaddrl, 0);
+
+ /* Clear the Out-of-sequence TxBD */
+ W16(ep, fen_tfcstat, 0);
+ W16(ep, fen_tfclen, 0);
+ W32(ep, fen_tfcptr, 0);
+
+ W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */
+ W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
+
+ /* set address */
+ mac = dev->dev_addr;
+ paddrh = ((u16)mac[5] << 8) | mac[4];
+ paddrm = ((u16)mac[3] << 8) | mac[2];
+ paddrl = ((u16)mac[1] << 8) | mac[0];
+
+ W16(ep, fen_paddrh, paddrh);
+ W16(ep, fen_paddrm, paddrm);
+ W16(ep, fen_paddrl, paddrl);
+
+ W16(ep, fen_taddrh, 0);
+ W16(ep, fen_taddrm, 0);
+ W16(ep, fen_taddrl, 0);
+
+ W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */
+ W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */
+
+ /* Clear stat counters, in case we ever enable RMON */
+ W32(ep, fen_octc, 0);
+ W32(ep, fen_colc, 0);
+ W32(ep, fen_broc, 0);
+ W32(ep, fen_mulc, 0);
+ W32(ep, fen_uspc, 0);
+ W32(ep, fen_frgc, 0);
+ W32(ep, fen_ospc, 0);
+ W32(ep, fen_jbrc, 0);
+ W32(ep, fen_p64c, 0);
+ W32(ep, fen_p65c, 0);
+ W32(ep, fen_p128c, 0);
+ W32(ep, fen_p256c, 0);
+ W32(ep, fen_p512c, 0);
+ W32(ep, fen_p1024c, 0);
+
+ W16(ep, fen_rfthr, 0); /* Suggested by manual */
+ W16(ep, fen_rfcnt, 0);
+ W16(ep, fen_cftype, 0);
+
+ fs_init_bds(dev);
+
+ /* adjust to speed (for RMII mode) */
+ if (fpi->use_rmii) {
+ if (fep->phydev->speed == 100)
+ C8(fcccp, fcc_gfemr, 0x20);
+ else
+ S8(fcccp, fcc_gfemr, 0x20);
+ }
+
+ fcc_cr_cmd(fep, CPM_CR_INIT_TRX);
+
+ /* clear events */
+ W16(fccp, fcc_fcce, 0xffff);
+
+ /* Enable interrupts we wish to service */
+ W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
+
+ /* Set GFMR to enable Ethernet operating mode */
+ W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
+
+ /* set sync/delimiters */
+ W16(fccp, fcc_fdsr, 0xd555);
+
+ W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
+
+ if (fpi->use_rmii)
+ S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
+
+ /* adjust to duplex mode */
+ if (fep->phydev->duplex)
+ S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
+ else
+ C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
+
+ /* Restore multicast and promiscuous settings */
+ set_multicast_list(dev);
+
+ S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
+}
+
+static void stop(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ /* stop ethernet */
+ C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
+
+ /* clear events */
+ W16(fccp, fcc_fcce, 0xffff);
+
+ /* clear interrupt mask */
+ W16(fccp, fcc_fccm, 0);
+
+ fs_cleanup_bds(dev);
+}
+
+static void napi_clear_rx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_enable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_disable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_clear_tx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ W16(fccp, fcc_fcce, FCC_NAPI_TX_EVENT_MSK);
+}
+
+static void napi_enable_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ S16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
+}
+
+static void napi_disable_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ C16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
+}
+
+static void rx_bd_done(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void tx_kickstart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ S16(fccp, fcc_ftodr, 0x8000);
+}
+
+static u32 get_int_events(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ return (u32)R16(fccp, fcc_fcce);
+}
+
+static void clear_int_events(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ W16(fccp, fcc_fcce, int_events & 0xffff);
+}
+
+static void ev_error(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "FS_ENET ERROR(s) 0x%x\n", int_events);
+}
+
+static int get_regs(struct net_device *dev, void *p, int *sizep)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (*sizep < sizeof(fcc_t) + sizeof(fcc_enet_t) + 1)
+ return -EINVAL;
+
+ memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t));
+ p = (char *)p + sizeof(fcc_t);
+
+ memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t));
+ p = (char *)p + sizeof(fcc_enet_t);
+
+ memcpy_fromio(p, fep->fcc.fcccp, 1);
+ return 0;
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ return sizeof(fcc_t) + sizeof(fcc_enet_t) + 1;
+}
+
+/* Some transmit errors cause the transmitter to shut
+ * down. We now issue a restart transmit.
+ * Also, to workaround 8260 device erratum CPM37, we must
+ * disable and then re-enable the transmitterfollowing a
+ * Late Collision, Underrun, or Retry Limit error.
+ * In addition, tbptr may point beyond BDs beyond still marked
+ * as ready due to internal pipelining, so we need to look back
+ * through the BDs and adjust tbptr to point to the last BD
+ * marked as ready. This may result in some buffers being
+ * retransmitted.
+ */
+static void tx_restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+ const struct fs_platform_info *fpi = fep->fpi;
+ fcc_enet_t __iomem *ep = fep->fcc.ep;
+ cbd_t __iomem *curr_tbptr;
+ cbd_t __iomem *recheck_bd;
+ cbd_t __iomem *prev_bd;
+ cbd_t __iomem *last_tx_bd;
+
+ last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
+
+ /* get the current bd held in TBPTR and scan back from this point */
+ recheck_bd = curr_tbptr = (cbd_t __iomem *)
+ ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) +
+ fep->ring_base);
+
+ prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1;
+
+ /* Move through the bds in reverse, look for the earliest buffer
+ * that is not ready. Adjust TBPTR to the following buffer */
+ while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) {
+ /* Go back one buffer */
+ recheck_bd = prev_bd;
+
+ /* update the previous buffer */
+ prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1;
+
+ /* We should never see all bds marked as ready, check anyway */
+ if (recheck_bd == curr_tbptr)
+ break;
+ }
+ /* Now update the TBPTR and dirty flag to the current buffer */
+ W32(ep, fen_genfcc.fcc_tbptr,
+ (uint) (((void *)recheck_bd - fep->ring_base) +
+ fep->ring_mem_addr));
+ fep->dirty_tx = recheck_bd;
+
+ C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
+ udelay(10);
+ S32(fccp, fcc_gfmr, FCC_GFMR_ENT);
+
+ fcc_cr_cmd(fep, CPM_CR_RESTART_TX);
+}
+
+/*************************************************************************/
+
+const struct fs_ops fs_fcc_ops = {
+ .setup_data = setup_data,
+ .cleanup_data = cleanup_data,
+ .set_multicast_list = set_multicast_list,
+ .restart = restart,
+ .stop = stop,
+ .napi_clear_rx_event = napi_clear_rx_event,
+ .napi_enable_rx = napi_enable_rx,
+ .napi_disable_rx = napi_disable_rx,
+ .napi_clear_tx_event = napi_clear_tx_event,
+ .napi_enable_tx = napi_enable_tx,
+ .napi_disable_tx = napi_disable_tx,
+ .rx_bd_done = rx_bd_done,
+ .tx_kickstart = tx_kickstart,
+ .get_int_events = get_int_events,
+ .clear_int_events = clear_int_events,
+ .ev_error = ev_error,
+ .get_regs = get_regs,
+ .get_regs_len = get_regs_len,
+ .tx_restart = tx_restart,
+ .allocate_bd = allocate_bd,
+ .free_bd = free_bd,
+};
diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
new file mode 100644
index 000000000..b34214e2d
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -0,0 +1,533 @@
+/*
+ * Freescale Ethernet controllers
+ *
+ * Copyright (c) 2005 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/gfp.h>
+
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_8xx
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/cpm1.h>
+#endif
+
+#include "fs_enet.h"
+#include "fec.h"
+
+/*************************************************/
+
+#if defined(CONFIG_CPM1)
+/* for a CPM1 __raw_xxx's are sufficient */
+#define __fs_out32(addr, x) __raw_writel(x, addr)
+#define __fs_out16(addr, x) __raw_writew(x, addr)
+#define __fs_in32(addr) __raw_readl(addr)
+#define __fs_in16(addr) __raw_readw(addr)
+#else
+/* for others play it safe */
+#define __fs_out32(addr, x) out_be32(addr, x)
+#define __fs_out16(addr, x) out_be16(addr, x)
+#define __fs_in32(addr) in_be32(addr)
+#define __fs_in16(addr) in_be16(addr)
+#endif
+
+/* write */
+#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
+
+/* read */
+#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
+
+/* set bits */
+#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
+
+/* clear bits */
+#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
+
+/*
+ * Delay to wait for FEC reset command to complete (in us)
+ */
+#define FEC_RESET_DELAY 50
+
+static int whack_reset(struct fec __iomem *fecp)
+{
+ int i;
+
+ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
+ for (i = 0; i < FEC_RESET_DELAY; i++) {
+ if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
+ return 0; /* OK */
+ udelay(1);
+ }
+
+ return -1;
+}
+
+static int do_pd_setup(struct fs_enet_private *fep)
+{
+ struct platform_device *ofdev = to_platform_device(fep->dev);
+
+ fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ if (fep->interrupt == NO_IRQ)
+ return -EINVAL;
+
+ fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
+ if (!fep->fcc.fccp)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
+#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB)
+#define FEC_RX_EVENT (FEC_ENET_RXF)
+#define FEC_TX_EVENT (FEC_ENET_TXF)
+#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
+ FEC_ENET_BABT | FEC_ENET_EBERR)
+
+static int setup_data(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (do_pd_setup(fep) != 0)
+ return -EINVAL;
+
+ fep->fec.hthi = 0;
+ fep->fec.htlo = 0;
+
+ fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
+ fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK;
+ fep->ev_rx = FEC_RX_EVENT;
+ fep->ev_tx = FEC_TX_EVENT;
+ fep->ev_err = FEC_ERR_EVENT_MSK;
+
+ return 0;
+}
+
+static int allocate_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev,
+ (fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), &fep->ring_mem_addr,
+ GFP_KERNEL);
+ if (fep->ring_base == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ if(fep->ring_base)
+ dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
+ * sizeof(cbd_t),
+ (void __force *)fep->ring_base,
+ fep->ring_mem_addr);
+}
+
+static void cleanup_data(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void set_promiscuous_mode(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
+}
+
+static void set_multicast_start(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ fep->fec.hthi = 0;
+ fep->fec.htlo = 0;
+}
+
+static void set_multicast_one(struct net_device *dev, const u8 *mac)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ int temp, hash_index, i, j;
+ u32 crc, csrVal;
+ u8 byte, msb;
+
+ crc = 0xffffffff;
+ for (i = 0; i < 6; i++) {
+ byte = mac[i];
+ for (j = 0; j < 8; j++) {
+ msb = crc >> 31;
+ crc <<= 1;
+ if (msb ^ (byte & 0x1))
+ crc ^= FEC_CRC_POLY;
+ byte >>= 1;
+ }
+ }
+
+ temp = (crc & 0x3f) >> 1;
+ hash_index = ((temp & 0x01) << 4) |
+ ((temp & 0x02) << 2) |
+ ((temp & 0x04)) |
+ ((temp & 0x08) >> 2) |
+ ((temp & 0x10) >> 4);
+ csrVal = 1 << hash_index;
+ if (crc & 1)
+ fep->fec.hthi |= csrVal;
+ else
+ fep->fec.htlo |= csrVal;
+}
+
+static void set_multicast_finish(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ /* if all multi or too many multicasts; just enable all */
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
+ fep->fec.hthi = 0xffffffffU;
+ fep->fec.htlo = 0xffffffffU;
+ }
+
+ FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
+ FW(fecp, grp_hash_table_high, fep->fec.hthi);
+ FW(fecp, grp_hash_table_low, fep->fec.htlo);
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+
+ if ((dev->flags & IFF_PROMISC) == 0) {
+ set_multicast_start(dev);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
+ set_multicast_finish(dev);
+ } else
+ set_promiscuous_mode(dev);
+}
+
+static void restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+ const struct fs_platform_info *fpi = fep->fpi;
+ dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
+ int r;
+ u32 addrhi, addrlo;
+
+ struct mii_bus* mii = fep->phydev->bus;
+ struct fec_info* fec_inf = mii->priv;
+
+ r = whack_reset(fep->fec.fecp);
+ if (r != 0)
+ dev_err(fep->dev, "FEC Reset FAILED!\n");
+ /*
+ * Set station address.
+ */
+ addrhi = ((u32) dev->dev_addr[0] << 24) |
+ ((u32) dev->dev_addr[1] << 16) |
+ ((u32) dev->dev_addr[2] << 8) |
+ (u32) dev->dev_addr[3];
+ addrlo = ((u32) dev->dev_addr[4] << 24) |
+ ((u32) dev->dev_addr[5] << 16);
+ FW(fecp, addr_low, addrhi);
+ FW(fecp, addr_high, addrlo);
+
+ /*
+ * Reset all multicast.
+ */
+ FW(fecp, grp_hash_table_high, fep->fec.hthi);
+ FW(fecp, grp_hash_table_low, fep->fec.htlo);
+
+ /*
+ * Set maximum receive buffer size.
+ */
+ FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16);
+#else
+ FW(fecp, r_hash, PKT_MAXBUF_SIZE);
+#endif
+
+ /* get physical address */
+ rx_bd_base_phys = fep->ring_mem_addr;
+ tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
+
+ /*
+ * Set receive and transmit descriptor base.
+ */
+ FW(fecp, r_des_start, rx_bd_base_phys);
+ FW(fecp, x_des_start, tx_bd_base_phys);
+
+ fs_init_bds(dev);
+
+ /*
+ * Enable big endian and don't care about SDMA FC.
+ */
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ FS(fecp, dma_control, 0xC0000000);
+#else
+ FW(fecp, fun_code, 0x78000000);
+#endif
+
+ /*
+ * Set MII speed.
+ */
+ FW(fecp, mii_speed, fec_inf->mii_speed);
+
+ /*
+ * Clear any outstanding interrupt.
+ */
+ FW(fecp, ievent, 0xffc0);
+#ifndef CONFIG_FS_ENET_MPC5121_FEC
+ FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
+
+ FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+#else
+ /*
+ * Only set MII/RMII mode - do not touch maximum frame length
+ * configured before.
+ */
+ FS(fecp, r_cntrl, fpi->use_rmii ?
+ FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
+#endif
+ /*
+ * adjust to duplex mode
+ */
+ if (fep->phydev->duplex) {
+ FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
+ FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
+ } else {
+ FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
+ FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
+ }
+
+ /* Restore multicast and promiscuous settings */
+ set_multicast_list(dev);
+
+ /*
+ * Enable interrupts we wish to service.
+ */
+ FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
+ FEC_ENET_RXF | FEC_ENET_RXB);
+
+ /*
+ * And last, enable the transmit and receive processing.
+ */
+ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ FW(fecp, r_des_active, 0x01000000);
+}
+
+static void stop(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ struct fec_info* feci= fep->phydev->bus->priv;
+
+ int i;
+
+ if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
+ return; /* already down */
+
+ FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
+ for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
+ i < FEC_RESET_DELAY; i++)
+ udelay(1);
+
+ if (i == FEC_RESET_DELAY)
+ dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n");
+ /*
+ * Disable FEC. Let only MII interrupts.
+ */
+ FW(fecp, imask, 0);
+ FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
+
+ fs_cleanup_bds(dev);
+
+ /* shut down FEC1? that's where the mii bus is */
+ if (fpi->has_phy) {
+ FS(fecp, r_cntrl, fpi->use_rmii ?
+ FEC_RCNTRL_RMII_MODE :
+ FEC_RCNTRL_MII_MODE); /* MII/RMII enable */
+ FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ FW(fecp, ievent, FEC_ENET_MII);
+ FW(fecp, mii_speed, feci->mii_speed);
+ }
+}
+
+static void napi_clear_rx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_enable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_disable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_clear_tx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK);
+}
+
+static void napi_enable_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
+}
+
+static void napi_disable_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
+}
+
+static void rx_bd_done(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, r_des_active, 0x01000000);
+}
+
+static void tx_kickstart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, x_des_active, 0x01000000);
+}
+
+static u32 get_int_events(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ return FR(fecp, ievent) & FR(fecp, imask);
+}
+
+static void clear_int_events(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, ievent, int_events);
+}
+
+static void ev_error(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events);
+}
+
+static int get_regs(struct net_device *dev, void *p, int *sizep)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (*sizep < sizeof(struct fec))
+ return -EINVAL;
+
+ memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec));
+
+ return 0;
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ return sizeof(struct fec);
+}
+
+static void tx_restart(struct net_device *dev)
+{
+ /* nothing */
+}
+
+/*************************************************************************/
+
+const struct fs_ops fs_fec_ops = {
+ .setup_data = setup_data,
+ .cleanup_data = cleanup_data,
+ .set_multicast_list = set_multicast_list,
+ .restart = restart,
+ .stop = stop,
+ .napi_clear_rx_event = napi_clear_rx_event,
+ .napi_enable_rx = napi_enable_rx,
+ .napi_disable_rx = napi_disable_rx,
+ .napi_clear_tx_event = napi_clear_tx_event,
+ .napi_enable_tx = napi_enable_tx,
+ .napi_disable_tx = napi_disable_tx,
+ .rx_bd_done = rx_bd_done,
+ .tx_kickstart = tx_kickstart,
+ .get_int_events = get_int_events,
+ .clear_int_events = clear_int_events,
+ .ev_error = ev_error,
+ .get_regs = get_regs,
+ .get_regs_len = get_regs_len,
+ .tx_restart = tx_restart,
+ .allocate_bd = allocate_bd,
+ .free_bd = free_bd,
+};
+
diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/kernel/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
new file mode 100644
index 000000000..7a184e881
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -0,0 +1,516 @@
+/*
+ * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_8xx
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/cpm1.h>
+#endif
+
+#include "fs_enet.h"
+
+/*************************************************/
+#if defined(CONFIG_CPM1)
+/* for a 8xx __raw_xxx's are sufficient */
+#define __fs_out32(addr, x) __raw_writel(x, addr)
+#define __fs_out16(addr, x) __raw_writew(x, addr)
+#define __fs_out8(addr, x) __raw_writeb(x, addr)
+#define __fs_in32(addr) __raw_readl(addr)
+#define __fs_in16(addr) __raw_readw(addr)
+#define __fs_in8(addr) __raw_readb(addr)
+#else
+/* for others play it safe */
+#define __fs_out32(addr, x) out_be32(addr, x)
+#define __fs_out16(addr, x) out_be16(addr, x)
+#define __fs_in32(addr) in_be32(addr)
+#define __fs_in16(addr) in_be16(addr)
+#define __fs_out8(addr, x) out_8(addr, x)
+#define __fs_in8(addr) in_8(addr)
+#endif
+
+/* write, read, set bits, clear bits */
+#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
+#define R32(_p, _m) __fs_in32(&(_p)->_m)
+#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
+#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
+
+#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
+#define R16(_p, _m) __fs_in16(&(_p)->_m)
+#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
+#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
+
+#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
+#define R8(_p, _m) __fs_in8(&(_p)->_m)
+#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
+#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
+
+#define SCC_MAX_MULTICAST_ADDRS 64
+
+/*
+ * Delay to wait for SCC reset command to complete (in us)
+ */
+#define SCC_RESET_DELAY 50
+
+static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
+{
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ return cpm_command(fpi->cp_command, op);
+}
+
+static int do_pd_setup(struct fs_enet_private *fep)
+{
+ struct platform_device *ofdev = to_platform_device(fep->dev);
+
+ fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ if (fep->interrupt == NO_IRQ)
+ return -EINVAL;
+
+ fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
+ if (!fep->scc.sccp)
+ return -EINVAL;
+
+ fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
+ if (!fep->scc.ep) {
+ iounmap(fep->scc.sccp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
+#define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXB)
+#define SCC_RX_EVENT (SCCE_ENET_RXF)
+#define SCC_TX_EVENT (SCCE_ENET_TXB)
+#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
+
+static int setup_data(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ do_pd_setup(fep);
+
+ fep->scc.hthi = 0;
+ fep->scc.htlo = 0;
+
+ fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
+ fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK;
+ fep->ev_rx = SCC_RX_EVENT;
+ fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
+ fep->ev_err = SCC_ERR_EVENT_MSK;
+
+ return 0;
+}
+
+static int allocate_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), 8);
+ if (IS_ERR_VALUE(fep->ring_mem_addr))
+ return -ENOMEM;
+
+ fep->ring_base = (void __iomem __force*)
+ cpm_dpram_addr(fep->ring_mem_addr);
+
+ return 0;
+}
+
+static void free_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (fep->ring_base)
+ cpm_dpfree(fep->ring_mem_addr);
+}
+
+static void cleanup_data(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void set_promiscuous_mode(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ S16(sccp, scc_psmr, SCC_PSMR_PRO);
+}
+
+static void set_multicast_start(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_enet_t __iomem *ep = fep->scc.ep;
+
+ W16(ep, sen_gaddr1, 0);
+ W16(ep, sen_gaddr2, 0);
+ W16(ep, sen_gaddr3, 0);
+ W16(ep, sen_gaddr4, 0);
+}
+
+static void set_multicast_one(struct net_device *dev, const u8 * mac)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_enet_t __iomem *ep = fep->scc.ep;
+ u16 taddrh, taddrm, taddrl;
+
+ taddrh = ((u16) mac[5] << 8) | mac[4];
+ taddrm = ((u16) mac[3] << 8) | mac[2];
+ taddrl = ((u16) mac[1] << 8) | mac[0];
+
+ W16(ep, sen_taddrh, taddrh);
+ W16(ep, sen_taddrm, taddrm);
+ W16(ep, sen_taddrl, taddrl);
+ scc_cr_cmd(fep, CPM_CR_SET_GADDR);
+}
+
+static void set_multicast_finish(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+ scc_enet_t __iomem *ep = fep->scc.ep;
+
+ /* clear promiscuous always */
+ C16(sccp, scc_psmr, SCC_PSMR_PRO);
+
+ /* if all multi or too many multicasts; just enable all */
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
+
+ W16(ep, sen_gaddr1, 0xffff);
+ W16(ep, sen_gaddr2, 0xffff);
+ W16(ep, sen_gaddr3, 0xffff);
+ W16(ep, sen_gaddr4, 0xffff);
+ }
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+
+ if ((dev->flags & IFF_PROMISC) == 0) {
+ set_multicast_start(dev);
+ netdev_for_each_mc_addr(ha, dev)
+ set_multicast_one(dev, ha->addr);
+ set_multicast_finish(dev);
+ } else
+ set_promiscuous_mode(dev);
+}
+
+/*
+ * This function is called to start or restart the FEC during a link
+ * change. This only happens when switching between half and full
+ * duplex.
+ */
+static void restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+ scc_enet_t __iomem *ep = fep->scc.ep;
+ const struct fs_platform_info *fpi = fep->fpi;
+ u16 paddrh, paddrm, paddrl;
+ const unsigned char *mac;
+ int i;
+
+ C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ /* clear everything (slow & steady does it) */
+ for (i = 0; i < sizeof(*ep); i++)
+ __fs_out8((u8 __iomem *)ep + i, 0);
+
+ /* point to bds */
+ W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
+ W16(ep, sen_genscc.scc_tbase,
+ fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
+
+ /* Initialize function code registers for big-endian.
+ */
+#ifndef CONFIG_NOT_COHERENT_CACHE
+ W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
+ W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
+#else
+ W8(ep, sen_genscc.scc_rfcr, SCC_EB);
+ W8(ep, sen_genscc.scc_tfcr, SCC_EB);
+#endif
+
+ /* Set maximum bytes per receive buffer.
+ * This appears to be an Ethernet frame size, not the buffer
+ * fragment size. It must be a multiple of four.
+ */
+ W16(ep, sen_genscc.scc_mrblr, 0x5f0);
+
+ /* Set CRC preset and mask.
+ */
+ W32(ep, sen_cpres, 0xffffffff);
+ W32(ep, sen_cmask, 0xdebb20e3);
+
+ W32(ep, sen_crcec, 0); /* CRC Error counter */
+ W32(ep, sen_alec, 0); /* alignment error counter */
+ W32(ep, sen_disfc, 0); /* discard frame counter */
+
+ W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
+ W16(ep, sen_retlim, 15); /* Retry limit threshold */
+
+ W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
+
+ W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
+
+ W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
+ W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
+
+ /* Clear hash tables.
+ */
+ W16(ep, sen_gaddr1, 0);
+ W16(ep, sen_gaddr2, 0);
+ W16(ep, sen_gaddr3, 0);
+ W16(ep, sen_gaddr4, 0);
+ W16(ep, sen_iaddr1, 0);
+ W16(ep, sen_iaddr2, 0);
+ W16(ep, sen_iaddr3, 0);
+ W16(ep, sen_iaddr4, 0);
+
+ /* set address
+ */
+ mac = dev->dev_addr;
+ paddrh = ((u16) mac[5] << 8) | mac[4];
+ paddrm = ((u16) mac[3] << 8) | mac[2];
+ paddrl = ((u16) mac[1] << 8) | mac[0];
+
+ W16(ep, sen_paddrh, paddrh);
+ W16(ep, sen_paddrm, paddrm);
+ W16(ep, sen_paddrl, paddrl);
+
+ W16(ep, sen_pper, 0);
+ W16(ep, sen_taddrl, 0);
+ W16(ep, sen_taddrm, 0);
+ W16(ep, sen_taddrh, 0);
+
+ fs_init_bds(dev);
+
+ scc_cr_cmd(fep, CPM_CR_INIT_TRX);
+
+ W16(sccp, scc_scce, 0xffff);
+
+ /* Enable interrupts we wish to service.
+ */
+ W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
+
+ /* Set GSMR_H to enable all normal operating modes.
+ * Set GSMR_L to enable Ethernet to MC68160.
+ */
+ W32(sccp, scc_gsmrh, 0);
+ W32(sccp, scc_gsmrl,
+ SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
+ SCC_GSMRL_MODE_ENET);
+
+ /* Set sync/delimiters.
+ */
+ W16(sccp, scc_dsr, 0xd555);
+
+ /* Set processing mode. Use Ethernet CRC, catch broadcast, and
+ * start frame search 22 bit times after RENA.
+ */
+ W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
+
+ /* Set full duplex mode if needed */
+ if (fep->phydev->duplex)
+ S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
+
+ /* Restore multicast and promiscuous settings */
+ set_multicast_list(dev);
+
+ S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+}
+
+static void stop(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+ int i;
+
+ for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
+ udelay(1);
+
+ if (i == SCC_RESET_DELAY)
+ dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
+
+ W16(sccp, scc_sccm, 0);
+ C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ fs_cleanup_bds(dev);
+}
+
+static void napi_clear_rx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_enable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_disable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_clear_tx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK);
+}
+
+static void napi_enable_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
+}
+
+static void napi_disable_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
+}
+
+static void rx_bd_done(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void tx_kickstart(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static u32 get_int_events(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ return (u32) R16(sccp, scc_scce);
+}
+
+static void clear_int_events(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ W16(sccp, scc_scce, int_events & 0xffff);
+}
+
+static void ev_error(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
+}
+
+static int get_regs(struct net_device *dev, void *p, int *sizep)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
+ return -EINVAL;
+
+ memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
+ p = (char *)p + sizeof(scc_t);
+
+ memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
+
+ return 0;
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
+}
+
+static void tx_restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ scc_cr_cmd(fep, CPM_CR_RESTART_TX);
+}
+
+
+
+/*************************************************************************/
+
+const struct fs_ops fs_scc_ops = {
+ .setup_data = setup_data,
+ .cleanup_data = cleanup_data,
+ .set_multicast_list = set_multicast_list,
+ .restart = restart,
+ .stop = stop,
+ .napi_clear_rx_event = napi_clear_rx_event,
+ .napi_enable_rx = napi_enable_rx,
+ .napi_disable_rx = napi_disable_rx,
+ .napi_clear_tx_event = napi_clear_tx_event,
+ .napi_enable_tx = napi_enable_tx,
+ .napi_disable_tx = napi_disable_tx,
+ .rx_bd_done = rx_bd_done,
+ .tx_kickstart = tx_kickstart,
+ .get_int_events = get_int_events,
+ .clear_int_events = clear_int_events,
+ .ev_error = ev_error,
+ .get_regs = get_regs,
+ .get_regs_len = get_regs_len,
+ .tx_restart = tx_restart,
+ .allocate_bd = allocate_bd,
+ .free_bd = free_bd,
+};
diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/kernel/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
new file mode 100644
index 000000000..68a428de0
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -0,0 +1,233 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/platform_device.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+
+#include "fs_enet.h"
+
+struct bb_info {
+ struct mdiobb_ctrl ctrl;
+ __be32 __iomem *dir;
+ __be32 __iomem *dat;
+ u32 mdio_msk;
+ u32 mdc_msk;
+};
+
+/* FIXME: If any other users of GPIO crop up, then these will have to
+ * have some sort of global synchronization to avoid races with other
+ * pins on the same port. The ideal solution would probably be to
+ * bind the ports to a GPIO driver, and have this be a client of it.
+ */
+static inline void bb_set(u32 __iomem *p, u32 m)
+{
+ out_be32(p, in_be32(p) | m);
+}
+
+static inline void bb_clr(u32 __iomem *p, u32 m)
+{
+ out_be32(p, in_be32(p) & ~m);
+}
+
+static inline int bb_read(u32 __iomem *p, u32 m)
+{
+ return (in_be32(p) & m) != 0;
+}
+
+static inline void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (dir)
+ bb_set(bitbang->dir, bitbang->mdio_msk);
+ else
+ bb_clr(bitbang->dir, bitbang->mdio_msk);
+
+ /* Read back to flush the write. */
+ in_be32(bitbang->dir);
+}
+
+static inline int mdio_read(struct mdiobb_ctrl *ctrl)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+ return bb_read(bitbang->dat, bitbang->mdio_msk);
+}
+
+static inline void mdio(struct mdiobb_ctrl *ctrl, int what)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (what)
+ bb_set(bitbang->dat, bitbang->mdio_msk);
+ else
+ bb_clr(bitbang->dat, bitbang->mdio_msk);
+
+ /* Read back to flush the write. */
+ in_be32(bitbang->dat);
+}
+
+static inline void mdc(struct mdiobb_ctrl *ctrl, int what)
+{
+ struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
+
+ if (what)
+ bb_set(bitbang->dat, bitbang->mdc_msk);
+ else
+ bb_clr(bitbang->dat, bitbang->mdc_msk);
+
+ /* Read back to flush the write. */
+ in_be32(bitbang->dat);
+}
+
+static struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = mdc,
+ .set_mdio_dir = mdio_dir,
+ .set_mdio_data = mdio,
+ .get_mdio_data = mdio_read,
+};
+
+static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np)
+{
+ struct resource res;
+ const u32 *data;
+ int mdio_pin, mdc_pin, len;
+ struct bb_info *bitbang = bus->priv;
+
+ int ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return ret;
+
+ if (resource_size(&res) <= 13)
+ return -ENODEV;
+
+ /* This should really encode the pin number as well, but all
+ * we get is an int, and the odds of multiple bitbang mdio buses
+ * is low enough that it's not worth going too crazy.
+ */
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+
+ data = of_get_property(np, "fsl,mdio-pin", &len);
+ if (!data || len != 4)
+ return -ENODEV;
+ mdio_pin = *data;
+
+ data = of_get_property(np, "fsl,mdc-pin", &len);
+ if (!data || len != 4)
+ return -ENODEV;
+ mdc_pin = *data;
+
+ bitbang->dir = ioremap(res.start, resource_size(&res));
+ if (!bitbang->dir)
+ return -ENOMEM;
+
+ bitbang->dat = bitbang->dir + 4;
+ bitbang->mdio_msk = 1 << (31 - mdio_pin);
+ bitbang->mdc_msk = 1 << (31 - mdc_pin);
+
+ return 0;
+}
+
+static int fs_enet_mdio_probe(struct platform_device *ofdev)
+{
+ struct mii_bus *new_bus;
+ struct bb_info *bitbang;
+ int ret = -ENOMEM;
+
+ bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
+ if (!bitbang)
+ goto out;
+
+ bitbang->ctrl.ops = &bb_ops;
+
+ new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
+ if (!new_bus)
+ goto out_free_priv;
+
+ new_bus->name = "CPM2 Bitbanged MII",
+
+ ret = fs_mii_bitbang_init(new_bus, ofdev->dev.of_node);
+ if (ret)
+ goto out_free_bus;
+
+ new_bus->phy_mask = ~0;
+ new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!new_bus->irq) {
+ ret = -ENOMEM;
+ goto out_unmap_regs;
+ }
+
+ new_bus->parent = &ofdev->dev;
+ platform_set_drvdata(ofdev, new_bus);
+
+ ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
+ if (ret)
+ goto out_free_irqs;
+
+ return 0;
+
+out_free_irqs:
+ kfree(new_bus->irq);
+out_unmap_regs:
+ iounmap(bitbang->dir);
+out_free_bus:
+ free_mdio_bitbang(new_bus);
+out_free_priv:
+ kfree(bitbang);
+out:
+ return ret;
+}
+
+static int fs_enet_mdio_remove(struct platform_device *ofdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(ofdev);
+ struct bb_info *bitbang = bus->priv;
+
+ mdiobus_unregister(bus);
+ kfree(bus->irq);
+ free_mdio_bitbang(bus);
+ iounmap(bitbang->dir);
+ kfree(bitbang);
+
+ return 0;
+}
+
+static const struct of_device_id fs_enet_mdio_bb_match[] = {
+ {
+ .compatible = "fsl,cpm2-mdio-bitbang",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
+
+static struct platform_driver fs_enet_bb_mdio_driver = {
+ .driver = {
+ .name = "fsl-bb-mdio",
+ .of_match_table = fs_enet_mdio_bb_match,
+ },
+ .probe = fs_enet_mdio_probe,
+ .remove = fs_enet_mdio_remove,
+};
+
+module_platform_driver(fs_enet_bb_mdio_driver);
diff --git a/kernel/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/kernel/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
new file mode 100644
index 000000000..2be383e6d
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -0,0 +1,234 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/mpc5xxx.h>
+
+#include "fs_enet.h"
+#include "fec.h"
+
+/* Make MII read/write commands for the FEC.
+*/
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
+#define mk_mii_end 0
+
+#define FEC_MII_LOOPS 10000
+
+static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
+{
+ struct fec_info* fec = bus->priv;
+ struct fec __iomem *fecp = fec->fecp;
+ int i, ret = -1;
+
+ BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
+
+ /* Add PHY address to register command. */
+ out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS) {
+ out_be32(&fecp->fec_ievent, FEC_ENET_MII);
+ ret = in_be32(&fecp->fec_mii_data) & 0xffff;
+ }
+
+ return ret;
+}
+
+static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
+{
+ struct fec_info* fec = bus->priv;
+ struct fec __iomem *fecp = fec->fecp;
+ int i;
+
+ /* this must never happen */
+ BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
+
+ /* Add PHY address to register command. */
+ out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS)
+ out_be32(&fecp->fec_ievent, FEC_ENET_MII);
+
+ return 0;
+
+}
+
+static const struct of_device_id fs_enet_mdio_fec_match[];
+static int fs_enet_mdio_probe(struct platform_device *ofdev)
+{
+ const struct of_device_id *match;
+ struct resource res;
+ struct mii_bus *new_bus;
+ struct fec_info *fec;
+ int (*get_bus_freq)(struct device_node *);
+ int ret = -ENOMEM, clock, speed;
+
+ match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
+ if (!match)
+ return -EINVAL;
+ get_bus_freq = match->data;
+
+ new_bus = mdiobus_alloc();
+ if (!new_bus)
+ goto out;
+
+ fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
+ if (!fec)
+ goto out_mii;
+
+ new_bus->priv = fec;
+ new_bus->name = "FEC MII Bus";
+ new_bus->read = &fs_enet_fec_mii_read;
+ new_bus->write = &fs_enet_fec_mii_write;
+
+ ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
+ if (ret)
+ goto out_res;
+
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
+
+ fec->fecp = ioremap(res.start, resource_size(&res));
+ if (!fec->fecp) {
+ ret = -ENOMEM;
+ goto out_fec;
+ }
+
+ if (get_bus_freq) {
+ clock = get_bus_freq(ofdev->dev.of_node);
+ if (!clock) {
+ /* Use maximum divider if clock is unknown */
+ dev_warn(&ofdev->dev, "could not determine IPS clock\n");
+ clock = 0x3F * 5000000;
+ }
+ } else
+ clock = ppc_proc_freq;
+
+ /*
+ * Scale for a MII clock <= 2.5 MHz
+ * Note that only 6 bits (25:30) are available for MII speed.
+ */
+ speed = (clock + 4999999) / 5000000;
+ if (speed > 0x3F) {
+ speed = 0x3F;
+ dev_err(&ofdev->dev,
+ "MII clock (%d Hz) exceeds max (2.5 MHz)\n",
+ clock / speed);
+ }
+
+ fec->mii_speed = speed << 1;
+
+ setbits32(&fec->fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE);
+ setbits32(&fec->fecp->fec_ecntrl, FEC_ECNTRL_PINMUX |
+ FEC_ECNTRL_ETHER_EN);
+ out_be32(&fec->fecp->fec_ievent, FEC_ENET_MII);
+ clrsetbits_be32(&fec->fecp->fec_mii_speed, 0x7E, fec->mii_speed);
+
+ new_bus->phy_mask = ~0;
+ new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!new_bus->irq) {
+ ret = -ENOMEM;
+ goto out_unmap_regs;
+ }
+
+ new_bus->parent = &ofdev->dev;
+ platform_set_drvdata(ofdev, new_bus);
+
+ ret = of_mdiobus_register(new_bus, ofdev->dev.of_node);
+ if (ret)
+ goto out_free_irqs;
+
+ return 0;
+
+out_free_irqs:
+ kfree(new_bus->irq);
+out_unmap_regs:
+ iounmap(fec->fecp);
+out_res:
+out_fec:
+ kfree(fec);
+out_mii:
+ mdiobus_free(new_bus);
+out:
+ return ret;
+}
+
+static int fs_enet_mdio_remove(struct platform_device *ofdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(ofdev);
+ struct fec_info *fec = bus->priv;
+
+ mdiobus_unregister(bus);
+ kfree(bus->irq);
+ iounmap(fec->fecp);
+ kfree(fec);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static const struct of_device_id fs_enet_mdio_fec_match[] = {
+ {
+ .compatible = "fsl,pq1-fec-mdio",
+ },
+#if defined(CONFIG_PPC_MPC512x)
+ {
+ .compatible = "fsl,mpc5121-fec-mdio",
+ .data = mpc5xxx_get_bus_frequency,
+ },
+#endif
+ {},
+};
+MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
+
+static struct platform_driver fs_enet_fec_mdio_driver = {
+ .driver = {
+ .name = "fsl-fec-mdio",
+ .of_match_table = fs_enet_mdio_fec_match,
+ },
+ .probe = fs_enet_mdio_probe,
+ .remove = fs_enet_mdio_remove,
+};
+
+module_platform_driver(fs_enet_fec_mdio_driver);
diff --git a/kernel/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/kernel/drivers/net/ethernet/freescale/fsl_pq_mdio.c
new file mode 100644
index 000000000..3c40f6b99
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -0,0 +1,499 @@
+/*
+ * Freescale PowerQUICC Ethernet Driver -- MIIM bus implementation
+ * Provides Bus interface for MIIM regs
+ *
+ * Author: Andy Fleming <afleming@freescale.com>
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
+ *
+ * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/of_device.h>
+
+#include <asm/io.h>
+#if IS_ENABLED(CONFIG_UCC_GETH)
+#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */
+#endif
+
+#include "gianfar.h"
+
+#define MIIMIND_BUSY 0x00000001
+#define MIIMIND_NOTVALID 0x00000004
+#define MIIMCFG_INIT_VALUE 0x00000007
+#define MIIMCFG_RESET 0x80000000
+
+#define MII_READ_COMMAND 0x00000001
+
+struct fsl_pq_mii {
+ u32 miimcfg; /* MII management configuration reg */
+ u32 miimcom; /* MII management command reg */
+ u32 miimadd; /* MII management address reg */
+ u32 miimcon; /* MII management control reg */
+ u32 miimstat; /* MII management status reg */
+ u32 miimind; /* MII management indication reg */
+};
+
+struct fsl_pq_mdio {
+ u8 res1[16];
+ u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
+ u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
+ u8 res2[4];
+ u32 emapm; /* MDIO Event mapping register (for etsec2)*/
+ u8 res3[1280];
+ struct fsl_pq_mii mii;
+ u8 res4[28];
+ u32 utbipar; /* TBI phy address reg (only on UCC) */
+ u8 res5[2728];
+} __packed;
+
+/* Number of microseconds to wait for an MII register to respond */
+#define MII_TIMEOUT 1000
+
+struct fsl_pq_mdio_priv {
+ void __iomem *map;
+ struct fsl_pq_mii __iomem *regs;
+ int irqs[PHY_MAX_ADDR];
+};
+
+/*
+ * Per-device-type data. Each type of device tree node that we support gets
+ * one of these.
+ *
+ * @mii_offset: the offset of the MII registers within the memory map of the
+ * node. Some nodes define only the MII registers, and some define the whole
+ * MAC (which includes the MII registers).
+ *
+ * @get_tbipa: determines the address of the TBIPA register
+ *
+ * @ucc_configure: a special function for extra QE configuration
+ */
+struct fsl_pq_mdio_data {
+ unsigned int mii_offset; /* offset of the MII registers */
+ uint32_t __iomem * (*get_tbipa)(void __iomem *p);
+ void (*ucc_configure)(phys_addr_t start, phys_addr_t end);
+};
+
+/*
+ * Write value to the PHY at mii_id at register regnum, on the bus attached
+ * to the local interface, which may be different from the generic mdio bus
+ * (tied to a single interface), waiting until the write is done before
+ * returning. This is helpful in programming interfaces like the TBI which
+ * control interfaces like onchip SERDES and are always tied to the local
+ * mdio pins, which may not be the same as system mdio bus, used for
+ * controlling the external PHYs, for example.
+ */
+static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+ struct fsl_pq_mii __iomem *regs = priv->regs;
+ unsigned int timeout;
+
+ /* Set the PHY address and the register address we want to write */
+ iowrite32be((mii_id << 8) | regnum, &regs->miimadd);
+
+ /* Write out the value we want */
+ iowrite32be(value, &regs->miimcon);
+
+ /* Wait for the transaction to finish */
+ timeout = MII_TIMEOUT;
+ while ((ioread32be(&regs->miimind) & MIIMIND_BUSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ return timeout ? 0 : -ETIMEDOUT;
+}
+
+/*
+ * Read the bus for PHY at addr mii_id, register regnum, and return the value.
+ * Clears miimcom first.
+ *
+ * All PHY operation done on the bus attached to the local interface, which
+ * may be different from the generic mdio bus. This is helpful in programming
+ * interfaces like the TBI which, in turn, control interfaces like on-chip
+ * SERDES and are always tied to the local mdio pins, which may not be the
+ * same as system mdio bus, used for controlling the external PHYs, for eg.
+ */
+static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+ struct fsl_pq_mii __iomem *regs = priv->regs;
+ unsigned int timeout;
+ u16 value;
+
+ /* Set the PHY address and the register address we want to read */
+ iowrite32be((mii_id << 8) | regnum, &regs->miimadd);
+
+ /* Clear miimcom, and then initiate a read */
+ iowrite32be(0, &regs->miimcom);
+ iowrite32be(MII_READ_COMMAND, &regs->miimcom);
+
+ /* Wait for the transaction to finish, normally less than 100us */
+ timeout = MII_TIMEOUT;
+ while ((ioread32be(&regs->miimind) &
+ (MIIMIND_NOTVALID | MIIMIND_BUSY)) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ /* Grab the value of the register from miimstat */
+ value = ioread32be(&regs->miimstat);
+
+ dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
+ return value;
+}
+
+/* Reset the MIIM registers, and wait for the bus to free */
+static int fsl_pq_mdio_reset(struct mii_bus *bus)
+{
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+ struct fsl_pq_mii __iomem *regs = priv->regs;
+ unsigned int timeout;
+
+ mutex_lock(&bus->mdio_lock);
+
+ /* Reset the management interface */
+ iowrite32be(MIIMCFG_RESET, &regs->miimcfg);
+
+ /* Setup the MII Mgmt clock speed */
+ iowrite32be(MIIMCFG_INIT_VALUE, &regs->miimcfg);
+
+ /* Wait until the bus is free */
+ timeout = MII_TIMEOUT;
+ while ((ioread32be(&regs->miimind) & MIIMIND_BUSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ mutex_unlock(&bus->mdio_lock);
+
+ if (!timeout) {
+ dev_err(&bus->dev, "timeout waiting for MII bus\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+/*
+ * This is mildly evil, but so is our hardware for doing this.
+ * Also, we have to cast back to struct gfar because of
+ * definition weirdness done in gianfar.h.
+ */
+static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
+{
+ struct gfar __iomem *enet_regs = p;
+
+ return &enet_regs->tbipa;
+}
+
+/*
+ * Return the TBIPAR address for an eTSEC2 node
+ */
+static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
+{
+ return p;
+}
+#endif
+
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+/*
+ * Return the TBIPAR address for a QE MDIO node
+ */
+static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
+{
+ struct fsl_pq_mdio __iomem *mdio = p;
+
+ return &mdio->utbipar;
+}
+
+/*
+ * Find the UCC node that controls the given MDIO node
+ *
+ * For some reason, the QE MDIO nodes are not children of the UCC devices
+ * that control them. Therefore, we need to scan all UCC nodes looking for
+ * the one that encompases the given MDIO node. We do this by comparing
+ * physical addresses. The 'start' and 'end' addresses of the MDIO node are
+ * passed, and the correct UCC node will cover the entire address range.
+ *
+ * This assumes that there is only one QE MDIO node in the entire device tree.
+ */
+static void ucc_configure(phys_addr_t start, phys_addr_t end)
+{
+ static bool found_mii_master;
+ struct device_node *np = NULL;
+
+ if (found_mii_master)
+ return;
+
+ for_each_compatible_node(np, NULL, "ucc_geth") {
+ struct resource res;
+ const uint32_t *iprop;
+ uint32_t id;
+ int ret;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret < 0) {
+ pr_debug("fsl-pq-mdio: no address range in node %s\n",
+ np->full_name);
+ continue;
+ }
+
+ /* if our mdio regs fall within this UCC regs range */
+ if ((start < res.start) || (end > res.end))
+ continue;
+
+ iprop = of_get_property(np, "cell-index", NULL);
+ if (!iprop) {
+ iprop = of_get_property(np, "device-id", NULL);
+ if (!iprop) {
+ pr_debug("fsl-pq-mdio: no UCC ID in node %s\n",
+ np->full_name);
+ continue;
+ }
+ }
+
+ id = be32_to_cpup(iprop);
+
+ /*
+ * cell-index and device-id for QE nodes are
+ * numbered from 1, not 0.
+ */
+ if (ucc_set_qe_mux_mii_mng(id - 1) < 0) {
+ pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n",
+ np->full_name);
+ continue;
+ }
+
+ pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id);
+ found_mii_master = true;
+ }
+}
+
+#endif
+
+static const struct of_device_id fsl_pq_mdio_match[] = {
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+ {
+ .compatible = "fsl,gianfar-tbi",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ .get_tbipa = get_gfar_tbipa,
+ },
+ },
+ {
+ .compatible = "fsl,gianfar-mdio",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ .get_tbipa = get_gfar_tbipa,
+ },
+ },
+ {
+ .type = "mdio",
+ .compatible = "gianfar",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+ .get_tbipa = get_gfar_tbipa,
+ },
+ },
+ {
+ .compatible = "fsl,etsec2-tbi",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+ .get_tbipa = get_etsec_tbipa,
+ },
+ },
+ {
+ .compatible = "fsl,etsec2-mdio",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = offsetof(struct fsl_pq_mdio, mii),
+ .get_tbipa = get_etsec_tbipa,
+ },
+ },
+#endif
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+ {
+ .compatible = "fsl,ucc-mdio",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ .get_tbipa = get_ucc_tbipa,
+ .ucc_configure = ucc_configure,
+ },
+ },
+ {
+ /* Legacy UCC MDIO node */
+ .type = "mdio",
+ .compatible = "ucc_geth_phy",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ .get_tbipa = get_ucc_tbipa,
+ .ucc_configure = ucc_configure,
+ },
+ },
+#endif
+ /* No Kconfig option for Fman support yet */
+ {
+ .compatible = "fsl,fman-mdio",
+ .data = &(struct fsl_pq_mdio_data) {
+ .mii_offset = 0,
+ /* Fman TBI operations are handled elsewhere */
+ },
+ },
+
+ {},
+};
+MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
+
+static int fsl_pq_mdio_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id =
+ of_match_device(fsl_pq_mdio_match, &pdev->dev);
+ const struct fsl_pq_mdio_data *data = id->data;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource res;
+ struct device_node *tbi;
+ struct fsl_pq_mdio_priv *priv;
+ struct mii_bus *new_bus;
+ int err;
+
+ dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
+
+ new_bus = mdiobus_alloc_size(sizeof(*priv));
+ if (!new_bus)
+ return -ENOMEM;
+
+ priv = new_bus->priv;
+ new_bus->name = "Freescale PowerQUICC MII Bus",
+ new_bus->read = &fsl_pq_mdio_read;
+ new_bus->write = &fsl_pq_mdio_write;
+ new_bus->reset = &fsl_pq_mdio_reset;
+ new_bus->irq = priv->irqs;
+
+ err = of_address_to_resource(np, 0, &res);
+ if (err < 0) {
+ dev_err(&pdev->dev, "could not obtain address information\n");
+ goto error;
+ }
+
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name,
+ (unsigned long long)res.start);
+
+ priv->map = of_iomap(np, 0);
+ if (!priv->map) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * Some device tree nodes represent only the MII registers, and
+ * others represent the MAC and MII registers. The 'mii_offset' field
+ * contains the offset of the MII registers inside the mapped register
+ * space.
+ */
+ if (data->mii_offset > resource_size(&res)) {
+ dev_err(&pdev->dev, "invalid register map\n");
+ err = -EINVAL;
+ goto error;
+ }
+ priv->regs = priv->map + data->mii_offset;
+
+ new_bus->parent = &pdev->dev;
+ platform_set_drvdata(pdev, new_bus);
+
+ if (data->get_tbipa) {
+ for_each_child_of_node(np, tbi) {
+ if (strcmp(tbi->type, "tbi-phy") == 0) {
+ dev_dbg(&pdev->dev, "found TBI PHY node %s\n",
+ strrchr(tbi->full_name, '/') + 1);
+ break;
+ }
+ }
+
+ if (tbi) {
+ const u32 *prop = of_get_property(tbi, "reg", NULL);
+ uint32_t __iomem *tbipa;
+
+ if (!prop) {
+ dev_err(&pdev->dev,
+ "missing 'reg' property in node %s\n",
+ tbi->full_name);
+ err = -EBUSY;
+ goto error;
+ }
+
+ tbipa = data->get_tbipa(priv->map);
+
+ iowrite32be(be32_to_cpup(prop), tbipa);
+ }
+ }
+
+ if (data->ucc_configure)
+ data->ucc_configure(res.start, res.end);
+
+ err = of_mdiobus_register(new_bus, np);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register %s as MDIO bus\n",
+ new_bus->name);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (priv->map)
+ iounmap(priv->map);
+
+ kfree(new_bus);
+
+ return err;
+}
+
+
+static int fsl_pq_mdio_remove(struct platform_device *pdev)
+{
+ struct device *device = &pdev->dev;
+ struct mii_bus *bus = dev_get_drvdata(device);
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+
+ mdiobus_unregister(bus);
+
+ iounmap(priv->map);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static struct platform_driver fsl_pq_mdio_driver = {
+ .driver = {
+ .name = "fsl-pq_mdio",
+ .of_match_table = fsl_pq_mdio_match,
+ },
+ .probe = fsl_pq_mdio_probe,
+ .remove = fsl_pq_mdio_remove,
+};
+
+module_platform_driver(fsl_pq_mdio_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/net/ethernet/freescale/gianfar.c b/kernel/drivers/net/ethernet/freescale/gianfar.c
new file mode 100644
index 000000000..e616b71d5
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/gianfar.c
@@ -0,0 +1,3621 @@
+/* drivers/net/ethernet/freescale/gianfar.c
+ *
+ * Gianfar Ethernet Driver
+ * This driver is designed for the non-CPM ethernet controllers
+ * on the 85xx and 83xx family of integrated processors
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
+ * Copyright 2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Gianfar: AKA Lambda Draconis, "Dragon"
+ * RA 11 31 24.2
+ * Dec +69 19 52
+ * V 3.84
+ * B-V +1.62
+ *
+ * Theory of operation
+ *
+ * The driver is initialized through of_device. Configuration information
+ * is therefore conveyed through an OF-style device tree.
+ *
+ * The Gianfar Ethernet Controller uses a ring of buffer
+ * descriptors. The beginning is indicated by a register
+ * pointing to the physical address of the start of the ring.
+ * The end is determined by a "wrap" bit being set in the
+ * last descriptor of the ring.
+ *
+ * When a packet is received, the RXF bit in the
+ * IEVENT register is set, triggering an interrupt when the
+ * corresponding bit in the IMASK register is also set (if
+ * interrupt coalescing is active, then the interrupt may not
+ * happen immediately, but will wait until either a set number
+ * of frames or amount of time have passed). In NAPI, the
+ * interrupt handler will signal there is work to be done, and
+ * exit. This method will start at the last known empty
+ * descriptor, and process every subsequent descriptor until there
+ * are none left with data (NAPI will stop after a set number of
+ * packets to give time to other tasks, but will eventually
+ * process all the packets). The data arrives inside a
+ * pre-allocated skb, and so after the skb is passed up to the
+ * stack, a new skb must be allocated, and the address field in
+ * the buffer descriptor must be updated to indicate this new
+ * skb.
+ *
+ * When the kernel requests that a packet be transmitted, the
+ * driver starts where it left off last time, and points the
+ * descriptor at the buffer which was passed in. The driver
+ * then informs the DMA engine that there are packets ready to
+ * be transmitted. Once the controller is finished transmitting
+ * the packet, an interrupt may be triggered (under the same
+ * conditions as for reception, but depending on the TXF bit).
+ * The driver then cleans up the buffer.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/in.h>
+#include <linux/net_tstamp.h>
+
+#include <asm/io.h>
+#ifdef CONFIG_PPC
+#include <asm/reg.h>
+#include <asm/mpc85xx.h>
+#endif
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include "gianfar.h"
+
+#define TX_TIMEOUT (1*HZ)
+
+const char gfar_driver_version[] = "1.3";
+
+static int gfar_enet_open(struct net_device *dev);
+static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void gfar_reset_task(struct work_struct *work);
+static void gfar_timeout(struct net_device *dev);
+static int gfar_close(struct net_device *dev);
+static struct sk_buff *gfar_new_skb(struct net_device *dev,
+ dma_addr_t *bufaddr);
+static int gfar_set_mac_address(struct net_device *dev);
+static int gfar_change_mtu(struct net_device *dev, int new_mtu);
+static irqreturn_t gfar_error(int irq, void *dev_id);
+static irqreturn_t gfar_transmit(int irq, void *dev_id);
+static irqreturn_t gfar_interrupt(int irq, void *dev_id);
+static void adjust_link(struct net_device *dev);
+static noinline void gfar_update_link_state(struct gfar_private *priv);
+static int init_phy(struct net_device *dev);
+static int gfar_probe(struct platform_device *ofdev);
+static int gfar_remove(struct platform_device *ofdev);
+static void free_skb_resources(struct gfar_private *priv);
+static void gfar_set_multi(struct net_device *dev);
+static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
+static void gfar_configure_serdes(struct net_device *dev);
+static int gfar_poll_rx(struct napi_struct *napi, int budget);
+static int gfar_poll_tx(struct napi_struct *napi, int budget);
+static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
+static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void gfar_netpoll(struct net_device *dev);
+#endif
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull, struct napi_struct *napi);
+static void gfar_halt_nodisable(struct gfar_private *priv);
+static void gfar_clear_exact_match(struct net_device *dev);
+static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+ const u8 *addr);
+static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Gianfar Ethernet Driver");
+MODULE_LICENSE("GPL");
+
+static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ dma_addr_t buf)
+{
+ u32 lstatus;
+
+ bdp->bufPtr = cpu_to_be32(buf);
+
+ lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
+ if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
+ lstatus |= BD_LFLAG(RXBD_WRAP);
+
+ gfar_wmb();
+
+ bdp->lstatus = cpu_to_be32(lstatus);
+}
+
+static int gfar_init_bds(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ struct txbd8 *txbdp;
+ struct rxbd8 *rxbdp;
+ u32 __iomem *rfbptr;
+ int i, j;
+ dma_addr_t bufaddr;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ /* Initialize some variables in our dev structure */
+ tx_queue->num_txbdfree = tx_queue->tx_ring_size;
+ tx_queue->dirty_tx = tx_queue->tx_bd_base;
+ tx_queue->cur_tx = tx_queue->tx_bd_base;
+ tx_queue->skb_curtx = 0;
+ tx_queue->skb_dirtytx = 0;
+
+ /* Initialize Transmit Descriptor Ring */
+ txbdp = tx_queue->tx_bd_base;
+ for (j = 0; j < tx_queue->tx_ring_size; j++) {
+ txbdp->lstatus = 0;
+ txbdp->bufPtr = 0;
+ txbdp++;
+ }
+
+ /* Set the last descriptor in the ring to indicate wrap */
+ txbdp--;
+ txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
+ TXBD_WRAP);
+ }
+
+ rfbptr = &regs->rfbptr0;
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ rx_queue->cur_rx = rx_queue->rx_bd_base;
+ rx_queue->skb_currx = 0;
+ rxbdp = rx_queue->rx_bd_base;
+
+ for (j = 0; j < rx_queue->rx_ring_size; j++) {
+ struct sk_buff *skb = rx_queue->rx_skbuff[j];
+
+ if (skb) {
+ bufaddr = be32_to_cpu(rxbdp->bufPtr);
+ } else {
+ skb = gfar_new_skb(ndev, &bufaddr);
+ if (!skb) {
+ netdev_err(ndev, "Can't allocate RX buffers\n");
+ return -ENOMEM;
+ }
+ rx_queue->rx_skbuff[j] = skb;
+ }
+
+ gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
+ rxbdp++;
+ }
+
+ rx_queue->rfbptr = rfbptr;
+ rfbptr += 2;
+ }
+
+ return 0;
+}
+
+static int gfar_alloc_skb_resources(struct net_device *ndev)
+{
+ void *vaddr;
+ dma_addr_t addr;
+ int i, j, k;
+ struct gfar_private *priv = netdev_priv(ndev);
+ struct device *dev = priv->dev;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+
+ priv->total_tx_ring_size = 0;
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
+
+ priv->total_rx_ring_size = 0;
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
+
+ /* Allocate memory for the buffer descriptors */
+ vaddr = dma_alloc_coherent(dev,
+ (priv->total_tx_ring_size *
+ sizeof(struct txbd8)) +
+ (priv->total_rx_ring_size *
+ sizeof(struct rxbd8)),
+ &addr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ tx_queue->tx_bd_base = vaddr;
+ tx_queue->tx_bd_dma_base = addr;
+ tx_queue->dev = ndev;
+ /* enet DMA only understands physical addresses */
+ addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
+ vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
+ }
+
+ /* Start the rx descriptor ring where the tx ring leaves off */
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ rx_queue->rx_bd_base = vaddr;
+ rx_queue->rx_bd_dma_base = addr;
+ rx_queue->dev = ndev;
+ addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
+ vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
+ }
+
+ /* Setup the skbuff rings */
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_queue = priv->tx_queue[i];
+ tx_queue->tx_skbuff =
+ kmalloc_array(tx_queue->tx_ring_size,
+ sizeof(*tx_queue->tx_skbuff),
+ GFP_KERNEL);
+ if (!tx_queue->tx_skbuff)
+ goto cleanup;
+
+ for (k = 0; k < tx_queue->tx_ring_size; k++)
+ tx_queue->tx_skbuff[k] = NULL;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ rx_queue->rx_skbuff =
+ kmalloc_array(rx_queue->rx_ring_size,
+ sizeof(*rx_queue->rx_skbuff),
+ GFP_KERNEL);
+ if (!rx_queue->rx_skbuff)
+ goto cleanup;
+
+ for (j = 0; j < rx_queue->rx_ring_size; j++)
+ rx_queue->rx_skbuff[j] = NULL;
+ }
+
+ if (gfar_init_bds(ndev))
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ free_skb_resources(priv);
+ return -ENOMEM;
+}
+
+static void gfar_init_tx_rx_base(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
+ int i;
+
+ baddr = &regs->tbase0;
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
+ baddr += 2;
+ }
+
+ baddr = &regs->rbase0;
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
+ baddr += 2;
+ }
+}
+
+static void gfar_init_rqprm(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
+ int i;
+
+ baddr = &regs->rqprm0;
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
+ (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
+ baddr++;
+ }
+}
+
+static void gfar_rx_buff_size_config(struct gfar_private *priv)
+{
+ int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* set this when rx hw offload (TOE) functions are being used */
+ priv->uses_rxfcb = 0;
+
+ if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
+ priv->uses_rxfcb = 1;
+
+ if (priv->hwts_rx_en)
+ priv->uses_rxfcb = 1;
+
+ if (priv->uses_rxfcb)
+ frame_size += GMAC_FCB_LEN;
+
+ frame_size += priv->padding;
+
+ frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+ INCREMENTAL_BUFFER_SIZE;
+
+ priv->rx_buffer_size = frame_size;
+}
+
+static void gfar_mac_rx_config(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 rctrl = 0;
+
+ if (priv->rx_filer_enable) {
+ rctrl |= RCTRL_FILREN;
+ /* Program the RIR0 reg with the required distribution */
+ if (priv->poll_mode == GFAR_SQ_POLLING)
+ gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
+ else /* GFAR_MQ_POLLING */
+ gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
+ }
+
+ /* Restore PROMISC mode */
+ if (priv->ndev->flags & IFF_PROMISC)
+ rctrl |= RCTRL_PROM;
+
+ if (priv->ndev->features & NETIF_F_RXCSUM)
+ rctrl |= RCTRL_CHECKSUMMING;
+
+ if (priv->extended_hash)
+ rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
+
+ if (priv->padding) {
+ rctrl &= ~RCTRL_PAL_MASK;
+ rctrl |= RCTRL_PADDING(priv->padding);
+ }
+
+ /* Enable HW time stamping if requested from user space */
+ if (priv->hwts_rx_en)
+ rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+
+ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+
+ /* Clear the LFC bit */
+ gfar_write(&regs->rctrl, rctrl);
+ /* Init flow control threshold values */
+ gfar_init_rqprm(priv);
+ gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
+ rctrl |= RCTRL_LFC;
+
+ /* Init rctrl based on our settings */
+ gfar_write(&regs->rctrl, rctrl);
+}
+
+static void gfar_mac_tx_config(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tctrl = 0;
+
+ if (priv->ndev->features & NETIF_F_IP_CSUM)
+ tctrl |= TCTRL_INIT_CSUM;
+
+ if (priv->prio_sched_en)
+ tctrl |= TCTRL_TXSCHED_PRIO;
+ else {
+ tctrl |= TCTRL_TXSCHED_WRRS;
+ gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
+ gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
+ }
+
+ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ tctrl |= TCTRL_VLINS;
+
+ gfar_write(&regs->tctrl, tctrl);
+}
+
+static void gfar_configure_coalescing(struct gfar_private *priv,
+ unsigned long tx_mask, unsigned long rx_mask)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
+
+ if (priv->mode == MQ_MG_MODE) {
+ int i = 0;
+
+ baddr = &regs->txic0;
+ for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
+ gfar_write(baddr + i, 0);
+ if (likely(priv->tx_queue[i]->txcoalescing))
+ gfar_write(baddr + i, priv->tx_queue[i]->txic);
+ }
+
+ baddr = &regs->rxic0;
+ for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
+ gfar_write(baddr + i, 0);
+ if (likely(priv->rx_queue[i]->rxcoalescing))
+ gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+ }
+ } else {
+ /* Backward compatible case -- even if we enable
+ * multiple queues, there's only single reg to program
+ */
+ gfar_write(&regs->txic, 0);
+ if (likely(priv->tx_queue[0]->txcoalescing))
+ gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+
+ gfar_write(&regs->rxic, 0);
+ if (unlikely(priv->rx_queue[0]->rxcoalescing))
+ gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
+ }
+}
+
+void gfar_configure_coalescing_all(struct gfar_private *priv)
+{
+ gfar_configure_coalescing(priv, 0xFF, 0xFF);
+}
+
+static struct net_device_stats *gfar_get_stats(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
+ unsigned long tx_packets = 0, tx_bytes = 0;
+ int i;
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_packets += priv->rx_queue[i]->stats.rx_packets;
+ rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+ rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
+ }
+
+ dev->stats.rx_packets = rx_packets;
+ dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_dropped = rx_dropped;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+ tx_packets += priv->tx_queue[i]->stats.tx_packets;
+ }
+
+ dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_packets = tx_packets;
+
+ return &dev->stats;
+}
+
+static const struct net_device_ops gfar_netdev_ops = {
+ .ndo_open = gfar_enet_open,
+ .ndo_start_xmit = gfar_start_xmit,
+ .ndo_stop = gfar_close,
+ .ndo_change_mtu = gfar_change_mtu,
+ .ndo_set_features = gfar_set_features,
+ .ndo_set_rx_mode = gfar_set_multi,
+ .ndo_tx_timeout = gfar_timeout,
+ .ndo_do_ioctl = gfar_ioctl,
+ .ndo_get_stats = gfar_get_stats,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = gfar_netpoll,
+#endif
+};
+
+static void gfar_ints_disable(struct gfar_private *priv)
+{
+ int i;
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar __iomem *regs = priv->gfargrp[i].regs;
+ /* Clear IEVENT */
+ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
+
+ /* Initialize IMASK */
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+ }
+}
+
+static void gfar_ints_enable(struct gfar_private *priv)
+{
+ int i;
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar __iomem *regs = priv->gfargrp[i].regs;
+ /* Unmask the interrupts we look for */
+ gfar_write(&regs->imask, IMASK_DEFAULT);
+ }
+}
+
+static void lock_tx_qs(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ spin_lock(&priv->tx_queue[i]->txlock);
+}
+
+static void unlock_tx_qs(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ spin_unlock(&priv->tx_queue[i]->txlock);
+}
+
+static int gfar_alloc_tx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+ GFP_KERNEL);
+ if (!priv->tx_queue[i])
+ return -ENOMEM;
+
+ priv->tx_queue[i]->tx_skbuff = NULL;
+ priv->tx_queue[i]->qindex = i;
+ priv->tx_queue[i]->dev = priv->ndev;
+ spin_lock_init(&(priv->tx_queue[i]->txlock));
+ }
+ return 0;
+}
+
+static int gfar_alloc_rx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+ GFP_KERNEL);
+ if (!priv->rx_queue[i])
+ return -ENOMEM;
+
+ priv->rx_queue[i]->rx_skbuff = NULL;
+ priv->rx_queue[i]->qindex = i;
+ priv->rx_queue[i]->dev = priv->ndev;
+ }
+ return 0;
+}
+
+static void gfar_free_tx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ kfree(priv->tx_queue[i]);
+}
+
+static void gfar_free_rx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_queues; i++)
+ kfree(priv->rx_queue[i]);
+}
+
+static void unmap_group_regs(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < MAXGROUPS; i++)
+ if (priv->gfargrp[i].regs)
+ iounmap(priv->gfargrp[i].regs);
+}
+
+static void free_gfar_dev(struct gfar_private *priv)
+{
+ int i, j;
+
+ for (i = 0; i < priv->num_grps; i++)
+ for (j = 0; j < GFAR_NUM_IRQS; j++) {
+ kfree(priv->gfargrp[i].irqinfo[j]);
+ priv->gfargrp[i].irqinfo[j] = NULL;
+ }
+
+ free_netdev(priv->ndev);
+}
+
+static void disable_napi(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_grps; i++) {
+ napi_disable(&priv->gfargrp[i].napi_rx);
+ napi_disable(&priv->gfargrp[i].napi_tx);
+ }
+}
+
+static void enable_napi(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_grps; i++) {
+ napi_enable(&priv->gfargrp[i].napi_rx);
+ napi_enable(&priv->gfargrp[i].napi_tx);
+ }
+}
+
+static int gfar_parse_group(struct device_node *np,
+ struct gfar_private *priv, const char *model)
+{
+ struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
+ int i;
+
+ for (i = 0; i < GFAR_NUM_IRQS; i++) {
+ grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
+ GFP_KERNEL);
+ if (!grp->irqinfo[i])
+ return -ENOMEM;
+ }
+
+ grp->regs = of_iomap(np, 0);
+ if (!grp->regs)
+ return -ENOMEM;
+
+ gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
+
+ /* If we aren't the FEC we have multiple interrupts */
+ if (model && strcasecmp(model, "FEC")) {
+ gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
+ gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
+ if (gfar_irq(grp, TX)->irq == NO_IRQ ||
+ gfar_irq(grp, RX)->irq == NO_IRQ ||
+ gfar_irq(grp, ER)->irq == NO_IRQ)
+ return -EINVAL;
+ }
+
+ grp->priv = priv;
+ spin_lock_init(&grp->grplock);
+ if (priv->mode == MQ_MG_MODE) {
+ u32 rxq_mask, txq_mask;
+ int ret;
+
+ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+ grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+
+ ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
+ if (!ret) {
+ grp->rx_bit_map = rxq_mask ?
+ rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ }
+
+ ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
+ if (!ret) {
+ grp->tx_bit_map = txq_mask ?
+ txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ }
+
+ if (priv->poll_mode == GFAR_SQ_POLLING) {
+ /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
+ grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+ grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+ }
+ } else {
+ grp->rx_bit_map = 0xFF;
+ grp->tx_bit_map = 0xFF;
+ }
+
+ /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
+ * right to left, so we need to revert the 8 bits to get the q index
+ */
+ grp->rx_bit_map = bitrev8(grp->rx_bit_map);
+ grp->tx_bit_map = bitrev8(grp->tx_bit_map);
+
+ /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+ * also assign queues to groups
+ */
+ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+ if (!grp->rx_queue)
+ grp->rx_queue = priv->rx_queue[i];
+ grp->num_rx_queues++;
+ grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
+ priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+ priv->rx_queue[i]->grp = grp;
+ }
+
+ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+ if (!grp->tx_queue)
+ grp->tx_queue = priv->tx_queue[i];
+ grp->num_tx_queues++;
+ grp->tstat |= (TSTAT_CLEAR_THALT >> i);
+ priv->tqueue |= (TQUEUE_EN0 >> i);
+ priv->tx_queue[i]->grp = grp;
+ }
+
+ priv->num_grps++;
+
+ return 0;
+}
+
+static int gfar_of_group_count(struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_available_child_of_node(np, child)
+ if (!of_node_cmp(child->name, "queue-group"))
+ num++;
+
+ return num;
+}
+
+static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
+{
+ const char *model;
+ const char *ctype;
+ const void *mac_addr;
+ int err = 0, i;
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ struct device_node *np = ofdev->dev.of_node;
+ struct device_node *child = NULL;
+ struct property *stash;
+ u32 stash_len = 0;
+ u32 stash_idx = 0;
+ unsigned int num_tx_qs, num_rx_qs;
+ unsigned short mode, poll_mode;
+
+ if (!np)
+ return -ENODEV;
+
+ if (of_device_is_compatible(np, "fsl,etsec2")) {
+ mode = MQ_MG_MODE;
+ poll_mode = GFAR_SQ_POLLING;
+ } else {
+ mode = SQ_SG_MODE;
+ poll_mode = GFAR_SQ_POLLING;
+ }
+
+ if (mode == SQ_SG_MODE) {
+ num_tx_qs = 1;
+ num_rx_qs = 1;
+ } else { /* MQ_MG_MODE */
+ /* get the actual number of supported groups */
+ unsigned int num_grps = gfar_of_group_count(np);
+
+ if (num_grps == 0 || num_grps > MAXGROUPS) {
+ dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
+ num_grps);
+ pr_err("Cannot do alloc_etherdev, aborting\n");
+ return -EINVAL;
+ }
+
+ if (poll_mode == GFAR_SQ_POLLING) {
+ num_tx_qs = num_grps; /* one txq per int group */
+ num_rx_qs = num_grps; /* one rxq per int group */
+ } else { /* GFAR_MQ_POLLING */
+ u32 tx_queues, rx_queues;
+ int ret;
+
+ /* parse the num of HW tx and rx queues */
+ ret = of_property_read_u32(np, "fsl,num_tx_queues",
+ &tx_queues);
+ num_tx_qs = ret ? 1 : tx_queues;
+
+ ret = of_property_read_u32(np, "fsl,num_rx_queues",
+ &rx_queues);
+ num_rx_qs = ret ? 1 : rx_queues;
+ }
+ }
+
+ if (num_tx_qs > MAX_TX_QS) {
+ pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
+ num_tx_qs, MAX_TX_QS);
+ pr_err("Cannot do alloc_etherdev, aborting\n");
+ return -EINVAL;
+ }
+
+ if (num_rx_qs > MAX_RX_QS) {
+ pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
+ num_rx_qs, MAX_RX_QS);
+ pr_err("Cannot do alloc_etherdev, aborting\n");
+ return -EINVAL;
+ }
+
+ *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
+ dev = *pdev;
+ if (NULL == dev)
+ return -ENOMEM;
+
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+
+ priv->mode = mode;
+ priv->poll_mode = poll_mode;
+
+ priv->num_tx_queues = num_tx_qs;
+ netif_set_real_num_rx_queues(dev, num_rx_qs);
+ priv->num_rx_queues = num_rx_qs;
+
+ err = gfar_alloc_tx_queues(priv);
+ if (err)
+ goto tx_alloc_failed;
+
+ err = gfar_alloc_rx_queues(priv);
+ if (err)
+ goto rx_alloc_failed;
+
+ err = of_property_read_string(np, "model", &model);
+ if (err) {
+ pr_err("Device model property missing, aborting\n");
+ goto rx_alloc_failed;
+ }
+
+ /* Init Rx queue filer rule set linked list */
+ INIT_LIST_HEAD(&priv->rx_list.list);
+ priv->rx_list.count = 0;
+ mutex_init(&priv->rx_queue_access);
+
+ for (i = 0; i < MAXGROUPS; i++)
+ priv->gfargrp[i].regs = NULL;
+
+ /* Parse and initialize group specific information */
+ if (priv->mode == MQ_MG_MODE) {
+ for_each_available_child_of_node(np, child) {
+ if (of_node_cmp(child->name, "queue-group"))
+ continue;
+
+ err = gfar_parse_group(child, priv, model);
+ if (err)
+ goto err_grp_init;
+ }
+ } else { /* SQ_SG_MODE */
+ err = gfar_parse_group(np, priv, model);
+ if (err)
+ goto err_grp_init;
+ }
+
+ stash = of_find_property(np, "bd-stash", NULL);
+
+ if (stash) {
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
+ priv->bd_stash_en = 1;
+ }
+
+ err = of_property_read_u32(np, "rx-stash-len", &stash_len);
+
+ if (err == 0)
+ priv->rx_stash_size = stash_len;
+
+ err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
+
+ if (err == 0)
+ priv->rx_stash_index = stash_idx;
+
+ if (stash_len || stash_idx)
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
+
+ mac_addr = of_get_mac_address(np);
+
+ if (mac_addr)
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+
+ if (model && !strcasecmp(model, "TSEC"))
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+
+ if (model && !strcasecmp(model, "eTSEC"))
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+ FSL_GIANFAR_DEV_HAS_CSUM |
+ FSL_GIANFAR_DEV_HAS_VLAN |
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+ FSL_GIANFAR_DEV_HAS_TIMER;
+
+ err = of_property_read_string(np, "phy-connection-type", &ctype);
+
+ /* We only care about rgmii-id. The rest are autodetected */
+ if (err == 0 && !strcmp(ctype, "rgmii-id"))
+ priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
+ else
+ priv->interface = PHY_INTERFACE_MODE_MII;
+
+ if (of_find_property(np, "fsl,magic-packet", NULL))
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
+
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (!priv->phy_node && of_phy_is_fixed_link(np)) {
+ err = of_phy_register_fixed_link(np);
+ if (err)
+ goto err_grp_init;
+
+ priv->phy_node = of_node_get(np);
+ }
+
+ /* Find the TBI PHY. If it's not there, we don't support SGMII */
+ priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
+
+ return 0;
+
+err_grp_init:
+ unmap_group_regs(priv);
+rx_alloc_failed:
+ gfar_free_rx_queues(priv);
+tx_alloc_failed:
+ gfar_free_tx_queues(priv);
+ free_gfar_dev(priv);
+ return err;
+}
+
+static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ priv->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ if (priv->hwts_rx_en) {
+ priv->hwts_rx_en = 0;
+ reset_gfar(netdev);
+ }
+ break;
+ default:
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
+ return -ERANGE;
+ if (!priv->hwts_rx_en) {
+ priv->hwts_rx_en = 1;
+ reset_gfar(netdev);
+ }
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct gfar_private *priv = netdev_priv(netdev);
+
+ config.flags = 0;
+ config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = (priv->hwts_rx_en ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (cmd == SIOCSHWTSTAMP)
+ return gfar_hwtstamp_set(dev, rq);
+ if (cmd == SIOCGHWTSTAMP)
+ return gfar_hwtstamp_get(dev, rq);
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
+}
+
+static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
+ u32 class)
+{
+ u32 rqfpr = FPR_FILER_MASK;
+ u32 rqfcr = 0x0;
+
+ rqfar--;
+ rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar--;
+ rqfcr = RQFCR_CMP_NOMATCH;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar--;
+ rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
+ rqfpr = class;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar--;
+ rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
+ rqfpr = class;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ return rqfar;
+}
+
+static void gfar_init_filer_table(struct gfar_private *priv)
+{
+ int i = 0x0;
+ u32 rqfar = MAX_FILER_IDX;
+ u32 rqfcr = 0x0;
+ u32 rqfpr = FPR_FILER_MASK;
+
+ /* Default rule */
+ rqfcr = RQFCR_CMP_MATCH;
+ priv->ftp_rqfcr[rqfar] = rqfcr;
+ priv->ftp_rqfpr[rqfar] = rqfpr;
+ gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
+
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
+ rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
+
+ /* cur_filer_idx indicated the first non-masked rule */
+ priv->cur_filer_idx = rqfar;
+
+ /* Rest are masked rules */
+ rqfcr = RQFCR_CMP_NOMATCH;
+ for (i = 0; i < rqfar; i++) {
+ priv->ftp_rqfcr[i] = rqfcr;
+ priv->ftp_rqfpr[i] = rqfpr;
+ gfar_write_filer(priv, i, rqfcr, rqfpr);
+ }
+}
+
+#ifdef CONFIG_PPC
+static void __gfar_detect_errata_83xx(struct gfar_private *priv)
+{
+ unsigned int pvr = mfspr(SPRN_PVR);
+ unsigned int svr = mfspr(SPRN_SVR);
+ unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
+ unsigned int rev = svr & 0xffff;
+
+ /* MPC8313 Rev 2.0 and higher; All MPC837x */
+ if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ priv->errata |= GFAR_ERRATA_74;
+
+ /* MPC8313 and MPC837x all rev */
+ if ((pvr == 0x80850010 && mod == 0x80b0) ||
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ priv->errata |= GFAR_ERRATA_76;
+
+ /* MPC8313 Rev < 2.0 */
+ if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
+ priv->errata |= GFAR_ERRATA_12;
+}
+
+static void __gfar_detect_errata_85xx(struct gfar_private *priv)
+{
+ unsigned int svr = mfspr(SPRN_SVR);
+
+ if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
+ priv->errata |= GFAR_ERRATA_12;
+ if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
+ ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
+ priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
+}
+#endif
+
+static void gfar_detect_errata(struct gfar_private *priv)
+{
+ struct device *dev = &priv->ofdev->dev;
+
+ /* no plans to fix */
+ priv->errata |= GFAR_ERRATA_A002;
+
+#ifdef CONFIG_PPC
+ if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
+ __gfar_detect_errata_85xx(priv);
+ else /* non-mpc85xx parts, i.e. e300 core based */
+ __gfar_detect_errata_83xx(priv);
+#endif
+
+ if (priv->errata)
+ dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
+ priv->errata);
+}
+
+void gfar_mac_reset(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+
+ /* Reset MAC layer */
+ gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
+
+ /* We need to delay at least 3 TX clocks */
+ udelay(3);
+
+ /* the soft reset bit is not self-resetting, so we need to
+ * clear it before resuming normal operation
+ */
+ gfar_write(&regs->maccfg1, 0);
+
+ udelay(3);
+
+ /* Compute rx_buff_size based on config flags */
+ gfar_rx_buff_size_config(priv);
+
+ /* Initialize the max receive frame/buffer lengths */
+ gfar_write(&regs->maxfrm, priv->rx_buffer_size);
+ gfar_write(&regs->mrblr, priv->rx_buffer_size);
+
+ /* Initialize the Minimum Frame Length Register */
+ gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
+
+ /* Initialize MACCFG2. */
+ tempval = MACCFG2_INIT_SETTINGS;
+
+ /* If the mtu is larger than the max size for standard
+ * ethernet frames (ie, a jumbo frame), then set maccfg2
+ * to allow huge frames, and to check the length
+ */
+ if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
+ gfar_has_errata(priv, GFAR_ERRATA_74))
+ tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+
+ gfar_write(&regs->maccfg2, tempval);
+
+ /* Clear mac addr hash registers */
+ gfar_write(&regs->igaddr0, 0);
+ gfar_write(&regs->igaddr1, 0);
+ gfar_write(&regs->igaddr2, 0);
+ gfar_write(&regs->igaddr3, 0);
+ gfar_write(&regs->igaddr4, 0);
+ gfar_write(&regs->igaddr5, 0);
+ gfar_write(&regs->igaddr6, 0);
+ gfar_write(&regs->igaddr7, 0);
+
+ gfar_write(&regs->gaddr0, 0);
+ gfar_write(&regs->gaddr1, 0);
+ gfar_write(&regs->gaddr2, 0);
+ gfar_write(&regs->gaddr3, 0);
+ gfar_write(&regs->gaddr4, 0);
+ gfar_write(&regs->gaddr5, 0);
+ gfar_write(&regs->gaddr6, 0);
+ gfar_write(&regs->gaddr7, 0);
+
+ if (priv->extended_hash)
+ gfar_clear_exact_match(priv->ndev);
+
+ gfar_mac_rx_config(priv);
+
+ gfar_mac_tx_config(priv);
+
+ gfar_set_mac_address(priv->ndev);
+
+ gfar_set_multi(priv->ndev);
+
+ /* clear ievent and imask before configuring coalescing */
+ gfar_ints_disable(priv);
+
+ /* Configure the coalescing support */
+ gfar_configure_coalescing_all(priv);
+}
+
+static void gfar_hw_init(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 attrs;
+
+ /* Stop the DMA engine now, in case it was running before
+ * (The firmware could have used it, and left it running).
+ */
+ gfar_halt(priv);
+
+ gfar_mac_reset(priv);
+
+ /* Zero out the rmon mib registers if it has them */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
+
+ /* Mask off the CAM interrupts */
+ gfar_write(&regs->rmon.cam1, 0xffffffff);
+ gfar_write(&regs->rmon.cam2, 0xffffffff);
+ }
+
+ /* Initialize ECNTRL */
+ gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
+
+ /* Set the extraction length and index */
+ attrs = ATTRELI_EL(priv->rx_stash_size) |
+ ATTRELI_EI(priv->rx_stash_index);
+
+ gfar_write(&regs->attreli, attrs);
+
+ /* Start with defaults, and add stashing
+ * depending on driver parameters
+ */
+ attrs = ATTR_INIT_SETTINGS;
+
+ if (priv->bd_stash_en)
+ attrs |= ATTR_BDSTASH;
+
+ if (priv->rx_stash_size != 0)
+ attrs |= ATTR_BUFSTASH;
+
+ gfar_write(&regs->attr, attrs);
+
+ /* FIFO configs */
+ gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
+ gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
+ gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
+
+ /* Program the interrupt steering regs, only for MG devices */
+ if (priv->num_grps > 1)
+ gfar_write_isrg(priv);
+}
+
+static void gfar_init_addr_hash_table(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
+ priv->extended_hash = 1;
+ priv->hash_width = 9;
+
+ priv->hash_regs[0] = &regs->igaddr0;
+ priv->hash_regs[1] = &regs->igaddr1;
+ priv->hash_regs[2] = &regs->igaddr2;
+ priv->hash_regs[3] = &regs->igaddr3;
+ priv->hash_regs[4] = &regs->igaddr4;
+ priv->hash_regs[5] = &regs->igaddr5;
+ priv->hash_regs[6] = &regs->igaddr6;
+ priv->hash_regs[7] = &regs->igaddr7;
+ priv->hash_regs[8] = &regs->gaddr0;
+ priv->hash_regs[9] = &regs->gaddr1;
+ priv->hash_regs[10] = &regs->gaddr2;
+ priv->hash_regs[11] = &regs->gaddr3;
+ priv->hash_regs[12] = &regs->gaddr4;
+ priv->hash_regs[13] = &regs->gaddr5;
+ priv->hash_regs[14] = &regs->gaddr6;
+ priv->hash_regs[15] = &regs->gaddr7;
+
+ } else {
+ priv->extended_hash = 0;
+ priv->hash_width = 8;
+
+ priv->hash_regs[0] = &regs->gaddr0;
+ priv->hash_regs[1] = &regs->gaddr1;
+ priv->hash_regs[2] = &regs->gaddr2;
+ priv->hash_regs[3] = &regs->gaddr3;
+ priv->hash_regs[4] = &regs->gaddr4;
+ priv->hash_regs[5] = &regs->gaddr5;
+ priv->hash_regs[6] = &regs->gaddr6;
+ priv->hash_regs[7] = &regs->gaddr7;
+ }
+}
+
+/* Set up the ethernet device structure, private data,
+ * and anything else we need before we start
+ */
+static int gfar_probe(struct platform_device *ofdev)
+{
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ int err = 0, i;
+
+ err = gfar_of_init(ofdev, &dev);
+
+ if (err)
+ return err;
+
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+ priv->ofdev = ofdev;
+ priv->dev = &ofdev->dev;
+ SET_NETDEV_DEV(dev, &ofdev->dev);
+
+ spin_lock_init(&priv->bflock);
+ INIT_WORK(&priv->reset_task, gfar_reset_task);
+
+ platform_set_drvdata(ofdev, priv);
+
+ gfar_detect_errata(priv);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
+
+ /* Fill in the dev structure */
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->mtu = 1500;
+ dev->netdev_ops = &gfar_netdev_ops;
+ dev->ethtool_ops = &gfar_ethtool_ops;
+
+ /* Register for napi ...We are registering NAPI for each grp */
+ for (i = 0; i < priv->num_grps; i++) {
+ if (priv->poll_mode == GFAR_SQ_POLLING) {
+ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
+ gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
+ netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
+ gfar_poll_tx_sq, 2);
+ } else {
+ netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
+ gfar_poll_rx, GFAR_DEV_WEIGHT);
+ netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
+ gfar_poll_tx, 2);
+ }
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ gfar_init_addr_hash_table(priv);
+
+ /* Insert receive time stamps into padding alignment bytes */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ priv->padding = 8;
+
+ if (dev->features & NETIF_F_IP_CSUM ||
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ dev->needed_headroom = GMAC_FCB_LEN;
+
+ priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
+
+ /* Initializing some of the rx/tx queue level parameters */
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
+ priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
+ priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
+ priv->tx_queue[i]->txic = DEFAULT_TXIC;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
+ priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
+ priv->rx_queue[i]->rxic = DEFAULT_RXIC;
+ }
+
+ /* always enable rx filer */
+ priv->rx_filer_enable = 1;
+ /* Enable most messages by default */
+ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
+ /* use pritority h/w tx queue scheduling for single queue devices */
+ if (priv->num_tx_queues == 1)
+ priv->prio_sched_en = 1;
+
+ set_bit(GFAR_DOWN, &priv->state);
+
+ gfar_hw_init(priv);
+
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
+ err = register_netdev(dev);
+
+ if (err) {
+ pr_err("%s: Cannot register net device, aborting\n", dev->name);
+ goto register_fail;
+ }
+
+ device_init_wakeup(&dev->dev,
+ priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+
+ /* fill out IRQ number and name fields */
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
+ dev->name, "_g", '0' + i, "_tx");
+ sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
+ dev->name, "_g", '0' + i, "_rx");
+ sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
+ dev->name, "_g", '0' + i, "_er");
+ } else
+ strcpy(gfar_irq(grp, TX)->name, dev->name);
+ }
+
+ /* Initialize the filer table */
+ gfar_init_filer_table(priv);
+
+ /* Print out the device info */
+ netdev_info(dev, "mac: %pM\n", dev->dev_addr);
+
+ /* Even more device info helps when determining which kernel
+ * provided which set of benchmarks.
+ */
+ netdev_info(dev, "Running with NAPI enabled\n");
+ for (i = 0; i < priv->num_rx_queues; i++)
+ netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
+ i, priv->rx_queue[i]->rx_ring_size);
+ for (i = 0; i < priv->num_tx_queues; i++)
+ netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
+ i, priv->tx_queue[i]->tx_ring_size);
+
+ return 0;
+
+register_fail:
+ unmap_group_regs(priv);
+ gfar_free_rx_queues(priv);
+ gfar_free_tx_queues(priv);
+ of_node_put(priv->phy_node);
+ of_node_put(priv->tbi_node);
+ free_gfar_dev(priv);
+ return err;
+}
+
+static int gfar_remove(struct platform_device *ofdev)
+{
+ struct gfar_private *priv = platform_get_drvdata(ofdev);
+
+ of_node_put(priv->phy_node);
+ of_node_put(priv->tbi_node);
+
+ unregister_netdev(priv->ndev);
+ unmap_group_regs(priv);
+ gfar_free_rx_queues(priv);
+ gfar_free_tx_queues(priv);
+ free_gfar_dev(priv);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int gfar_suspend(struct device *dev)
+{
+ struct gfar_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ unsigned long flags;
+ u32 tempval;
+
+ int magic_packet = priv->wol_en &&
+ (priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+
+ netif_device_detach(ndev);
+
+ if (netif_running(ndev)) {
+
+ local_irq_save_nort(flags);
+ lock_tx_qs(priv);
+
+ gfar_halt_nodisable(priv);
+
+ /* Disable Tx, and Rx if wake-on-LAN is disabled. */
+ tempval = gfar_read(&regs->maccfg1);
+
+ tempval &= ~MACCFG1_TX_EN;
+
+ if (!magic_packet)
+ tempval &= ~MACCFG1_RX_EN;
+
+ gfar_write(&regs->maccfg1, tempval);
+
+ unlock_tx_qs(priv);
+ local_irq_restore_nort(flags);
+
+ disable_napi(priv);
+
+ if (magic_packet) {
+ /* Enable interrupt on Magic Packet */
+ gfar_write(&regs->imask, IMASK_MAG);
+
+ /* Enable Magic Packet mode */
+ tempval = gfar_read(&regs->maccfg2);
+ tempval |= MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
+ } else {
+ phy_stop(priv->phydev);
+ }
+ }
+
+ return 0;
+}
+
+static int gfar_resume(struct device *dev)
+{
+ struct gfar_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ unsigned long flags;
+ u32 tempval;
+ int magic_packet = priv->wol_en &&
+ (priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+
+ if (!netif_running(ndev)) {
+ netif_device_attach(ndev);
+ return 0;
+ }
+
+ if (!magic_packet && priv->phydev)
+ phy_start(priv->phydev);
+
+ /* Disable Magic Packet mode, in case something
+ * else woke us up.
+ */
+ local_irq_save_nort(flags);
+ lock_tx_qs(priv);
+
+ tempval = gfar_read(&regs->maccfg2);
+ tempval &= ~MACCFG2_MPEN;
+ gfar_write(&regs->maccfg2, tempval);
+
+ gfar_start(priv);
+
+ unlock_tx_qs(priv);
+ local_irq_restore_nort(flags);
+
+ netif_device_attach(ndev);
+
+ enable_napi(priv);
+
+ return 0;
+}
+
+static int gfar_restore(struct device *dev)
+{
+ struct gfar_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+
+ if (!netif_running(ndev)) {
+ netif_device_attach(ndev);
+
+ return 0;
+ }
+
+ if (gfar_init_bds(ndev)) {
+ free_skb_resources(priv);
+ return -ENOMEM;
+ }
+
+ gfar_mac_reset(priv);
+
+ gfar_init_tx_rx_base(priv);
+
+ gfar_start(priv);
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ netif_device_attach(ndev);
+ enable_napi(priv);
+
+ return 0;
+}
+
+static struct dev_pm_ops gfar_pm_ops = {
+ .suspend = gfar_suspend,
+ .resume = gfar_resume,
+ .freeze = gfar_suspend,
+ .thaw = gfar_resume,
+ .restore = gfar_restore,
+};
+
+#define GFAR_PM_OPS (&gfar_pm_ops)
+
+#else
+
+#define GFAR_PM_OPS NULL
+
+#endif
+
+/* Reads the controller's registers to determine what interface
+ * connects it to the PHY.
+ */
+static phy_interface_t gfar_get_interface(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 ecntrl;
+
+ ecntrl = gfar_read(&regs->ecntrl);
+
+ if (ecntrl & ECNTRL_SGMII_MODE)
+ return PHY_INTERFACE_MODE_SGMII;
+
+ if (ecntrl & ECNTRL_TBI_MODE) {
+ if (ecntrl & ECNTRL_REDUCED_MODE)
+ return PHY_INTERFACE_MODE_RTBI;
+ else
+ return PHY_INTERFACE_MODE_TBI;
+ }
+
+ if (ecntrl & ECNTRL_REDUCED_MODE) {
+ if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
+ return PHY_INTERFACE_MODE_RMII;
+ }
+ else {
+ phy_interface_t interface = priv->interface;
+
+ /* This isn't autodetected right now, so it must
+ * be set by the device tree or platform code.
+ */
+ if (interface == PHY_INTERFACE_MODE_RGMII_ID)
+ return PHY_INTERFACE_MODE_RGMII_ID;
+
+ return PHY_INTERFACE_MODE_RGMII;
+ }
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+ return PHY_INTERFACE_MODE_GMII;
+
+ return PHY_INTERFACE_MODE_MII;
+}
+
+
+/* Initializes driver's PHY state, and attaches to the PHY.
+ * Returns 0 on success.
+ */
+static int init_phy(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ uint gigabit_support =
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
+ GFAR_SUPPORTED_GBIT : 0;
+ phy_interface_t interface;
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ interface = gfar_get_interface(dev);
+
+ priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
+ interface);
+ if (!priv->phydev) {
+ dev_err(&dev->dev, "could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ gfar_configure_serdes(dev);
+
+ /* Remove any features not supported by the controller */
+ priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
+ priv->phydev->advertising = priv->phydev->supported;
+
+ /* Add support for flow control, but don't advertise it by default */
+ priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+
+ return 0;
+}
+
+/* Initialize TBI PHY interface for communicating with the
+ * SERDES lynx PHY on the chip. We communicate with this PHY
+ * through the MDIO bus on each controller, treating it as a
+ * "normal" PHY at the address found in the TBIPA register. We assume
+ * that the TBIPA register is valid. Either the MDIO bus code will set
+ * it to a value that doesn't conflict with other PHYs on the bus, or the
+ * value doesn't matter, as there are no other PHYs on the bus.
+ */
+static void gfar_configure_serdes(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *tbiphy;
+
+ if (!priv->tbi_node) {
+ dev_warn(&dev->dev, "error: SGMII mode requires that the "
+ "device tree specify a tbi-handle\n");
+ return;
+ }
+
+ tbiphy = of_phy_find_device(priv->tbi_node);
+ if (!tbiphy) {
+ dev_err(&dev->dev, "error: Could not get TBI device\n");
+ return;
+ }
+
+ /* If the link is already up, we must already be ok, and don't need to
+ * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
+ * everything for us? Resetting it takes the link down and requires
+ * several seconds for it to come back.
+ */
+ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
+ return;
+
+ /* Single clk mode, mii mode off(for serdes communication) */
+ phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
+
+ phy_write(tbiphy, MII_ADVERTISE,
+ ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM);
+
+ phy_write(tbiphy, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
+ BMCR_SPEED1000);
+}
+
+static int __gfar_is_rx_idle(struct gfar_private *priv)
+{
+ u32 res;
+
+ /* Normaly TSEC should not hang on GRS commands, so we should
+ * actually wait for IEVENT_GRSC flag.
+ */
+ if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
+ return 0;
+
+ /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
+ * the same as bits 23-30, the eTSEC Rx is assumed to be idle
+ * and the Rx can be safely reset.
+ */
+ res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
+ res &= 0x7f807f80;
+ if ((res & 0xffff) == (res >> 16))
+ return 1;
+
+ return 0;
+}
+
+/* Halt the receive and transmit queues */
+static void gfar_halt_nodisable(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ unsigned int timeout;
+ int stopped;
+
+ gfar_ints_disable(priv);
+
+ if (gfar_is_dma_stopped(priv))
+ return;
+
+ /* Stop the DMA, and wait for it to stop */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&regs->dmactrl, tempval);
+
+retry:
+ timeout = 1000;
+ while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ if (!timeout)
+ stopped = gfar_is_dma_stopped(priv);
+
+ if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
+ !__gfar_is_rx_idle(priv))
+ goto retry;
+}
+
+/* Halt the receive and transmit queues */
+void gfar_halt(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+
+ /* Dissable the Rx/Tx hw queues */
+ gfar_write(&regs->rqueue, 0);
+ gfar_write(&regs->tqueue, 0);
+
+ mdelay(10);
+
+ gfar_halt_nodisable(priv);
+
+ /* Disable Rx/Tx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+}
+
+void stop_gfar(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ netif_tx_stop_all_queues(dev);
+
+ smp_mb__before_atomic();
+ set_bit(GFAR_DOWN, &priv->state);
+ smp_mb__after_atomic();
+
+ disable_napi(priv);
+
+ /* disable ints and gracefully shut down Rx/Tx DMA */
+ gfar_halt(priv);
+
+ phy_stop(priv->phydev);
+
+ free_skb_resources(priv);
+}
+
+static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
+{
+ struct txbd8 *txbdp;
+ struct gfar_private *priv = netdev_priv(tx_queue->dev);
+ int i, j;
+
+ txbdp = tx_queue->tx_bd_base;
+
+ for (i = 0; i < tx_queue->tx_ring_size; i++) {
+ if (!tx_queue->tx_skbuff[i])
+ continue;
+
+ dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
+ be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
+ txbdp->lstatus = 0;
+ for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
+ j++) {
+ txbdp++;
+ dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
+ be16_to_cpu(txbdp->length),
+ DMA_TO_DEVICE);
+ }
+ txbdp++;
+ dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
+ tx_queue->tx_skbuff[i] = NULL;
+ }
+ kfree(tx_queue->tx_skbuff);
+ tx_queue->tx_skbuff = NULL;
+}
+
+static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
+{
+ struct rxbd8 *rxbdp;
+ struct gfar_private *priv = netdev_priv(rx_queue->dev);
+ int i;
+
+ rxbdp = rx_queue->rx_bd_base;
+
+ for (i = 0; i < rx_queue->rx_ring_size; i++) {
+ if (rx_queue->rx_skbuff[i]) {
+ dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
+ priv->rx_buffer_size,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
+ rx_queue->rx_skbuff[i] = NULL;
+ }
+ rxbdp->lstatus = 0;
+ rxbdp->bufPtr = 0;
+ rxbdp++;
+ }
+ kfree(rx_queue->rx_skbuff);
+ rx_queue->rx_skbuff = NULL;
+}
+
+/* If there are any tx skbs or rx skbs still around, free them.
+ * Then free tx_skbuff and rx_skbuff
+ */
+static void free_skb_resources(struct gfar_private *priv)
+{
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ int i;
+
+ /* Go through all the buffer descriptors and free their data buffers */
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ struct netdev_queue *txq;
+
+ tx_queue = priv->tx_queue[i];
+ txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
+ if (tx_queue->tx_skbuff)
+ free_skb_tx_queue(tx_queue);
+ netdev_tx_reset_queue(txq);
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ if (rx_queue->rx_skbuff)
+ free_skb_rx_queue(rx_queue);
+ }
+
+ dma_free_coherent(priv->dev,
+ sizeof(struct txbd8) * priv->total_tx_ring_size +
+ sizeof(struct rxbd8) * priv->total_rx_ring_size,
+ priv->tx_queue[0]->tx_bd_base,
+ priv->tx_queue[0]->tx_bd_dma_base);
+}
+
+void gfar_start(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ int i = 0;
+
+ /* Enable Rx/Tx hw queues */
+ gfar_write(&regs->rqueue, priv->rqueue);
+ gfar_write(&regs->tqueue, priv->tqueue);
+
+ /* Initialize DMACTRL to have WWR and WOP */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval |= DMACTRL_INIT_SETTINGS;
+ gfar_write(&regs->dmactrl, tempval);
+
+ /* Make sure we aren't stopped */
+ tempval = gfar_read(&regs->dmactrl);
+ tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&regs->dmactrl, tempval);
+
+ for (i = 0; i < priv->num_grps; i++) {
+ regs = priv->gfargrp[i].regs;
+ /* Clear THLT/RHLT, so that the DMA starts polling now */
+ gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
+ gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+ }
+
+ /* Enable Rx/Tx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
+
+ gfar_ints_enable(priv);
+
+ priv->ndev->trans_start = jiffies; /* prevent tx timeout */
+}
+
+static void free_grp_irqs(struct gfar_priv_grp *grp)
+{
+ free_irq(gfar_irq(grp, TX)->irq, grp);
+ free_irq(gfar_irq(grp, RX)->irq, grp);
+ free_irq(gfar_irq(grp, ER)->irq, grp);
+}
+
+static int register_grp_irqs(struct gfar_priv_grp *grp)
+{
+ struct gfar_private *priv = grp->priv;
+ struct net_device *dev = priv->ndev;
+ int err;
+
+ /* If the device has multiple interrupts, register for
+ * them. Otherwise, only register for the one
+ */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ /* Install our interrupt handlers for Error,
+ * Transmit, and Receive
+ */
+ err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
+ gfar_irq(grp, ER)->name, grp);
+ if (err < 0) {
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ gfar_irq(grp, ER)->irq);
+
+ goto err_irq_fail;
+ }
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
+ gfar_irq(grp, TX)->name, grp);
+ if (err < 0) {
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ gfar_irq(grp, TX)->irq);
+ goto tx_irq_fail;
+ }
+ err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
+ gfar_irq(grp, RX)->name, grp);
+ if (err < 0) {
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ gfar_irq(grp, RX)->irq);
+ goto rx_irq_fail;
+ }
+ } else {
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
+ gfar_irq(grp, TX)->name, grp);
+ if (err < 0) {
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ gfar_irq(grp, TX)->irq);
+ goto err_irq_fail;
+ }
+ }
+
+ return 0;
+
+rx_irq_fail:
+ free_irq(gfar_irq(grp, TX)->irq, grp);
+tx_irq_fail:
+ free_irq(gfar_irq(grp, ER)->irq, grp);
+err_irq_fail:
+ return err;
+
+}
+
+static void gfar_free_irq(struct gfar_private *priv)
+{
+ int i;
+
+ /* Free the IRQs */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ for (i = 0; i < priv->num_grps; i++)
+ free_grp_irqs(&priv->gfargrp[i]);
+ } else {
+ for (i = 0; i < priv->num_grps; i++)
+ free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
+ &priv->gfargrp[i]);
+ }
+}
+
+static int gfar_request_irq(struct gfar_private *priv)
+{
+ int err, i, j;
+
+ for (i = 0; i < priv->num_grps; i++) {
+ err = register_grp_irqs(&priv->gfargrp[i]);
+ if (err) {
+ for (j = 0; j < i; j++)
+ free_grp_irqs(&priv->gfargrp[j]);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ int err;
+
+ gfar_mac_reset(priv);
+
+ err = gfar_alloc_skb_resources(ndev);
+ if (err)
+ return err;
+
+ gfar_init_tx_rx_base(priv);
+
+ smp_mb__before_atomic();
+ clear_bit(GFAR_DOWN, &priv->state);
+ smp_mb__after_atomic();
+
+ /* Start Rx/Tx DMA and enable the interrupts */
+ gfar_start(priv);
+
+ phy_start(priv->phydev);
+
+ enable_napi(priv);
+
+ netif_tx_wake_all_queues(ndev);
+
+ return 0;
+}
+
+/* Called when something needs to use the ethernet device
+ * Returns 0 for success.
+ */
+static int gfar_enet_open(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int err;
+
+ err = init_phy(dev);
+ if (err)
+ return err;
+
+ err = gfar_request_irq(priv);
+ if (err)
+ return err;
+
+ err = startup_gfar(dev);
+ if (err)
+ return err;
+
+ device_set_wakeup_enable(&dev->dev, priv->wol_en);
+
+ return err;
+}
+
+static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
+{
+ struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
+
+ memset(fcb, 0, GMAC_FCB_LEN);
+
+ return fcb;
+}
+
+static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
+ int fcb_length)
+{
+ /* If we're here, it's a IP packet with a TCP or UDP
+ * payload. We set it to checksum, using a pseudo-header
+ * we provide
+ */
+ u8 flags = TXFCB_DEFAULT;
+
+ /* Tell the controller what the protocol is
+ * And provide the already calculated phcs
+ */
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
+ flags |= TXFCB_UDP;
+ fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
+ } else
+ fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
+
+ /* l3os is the distance between the start of the
+ * frame (skb->data) and the start of the IP hdr.
+ * l4os is the distance between the start of the
+ * l3 hdr and the l4 hdr
+ */
+ fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
+ fcb->l4os = skb_network_header_len(skb);
+
+ fcb->flags = flags;
+}
+
+void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
+{
+ fcb->flags |= TXFCB_VLN;
+ fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
+}
+
+static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
+ struct txbd8 *base, int ring_size)
+{
+ struct txbd8 *new_bd = bdp + stride;
+
+ return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
+}
+
+static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
+ int ring_size)
+{
+ return skip_txbd(bdp, 1, base, ring_size);
+}
+
+/* eTSEC12: csum generation not supported for some fcb offsets */
+static inline bool gfar_csum_errata_12(struct gfar_private *priv,
+ unsigned long fcb_addr)
+{
+ return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
+ (fcb_addr % 0x20) > 0x18);
+}
+
+/* eTSEC76: csum generation for frames larger than 2500 may
+ * cause excess delays before start of transmission
+ */
+static inline bool gfar_csum_errata_76(struct gfar_private *priv,
+ unsigned int len)
+{
+ return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
+ (len > 2500));
+}
+
+/* This is called by the kernel when a frame is ready for transmission.
+ * It is pointed to by the dev->hard_start_xmit function pointer
+ */
+static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct netdev_queue *txq;
+ struct gfar __iomem *regs = NULL;
+ struct txfcb *fcb = NULL;
+ struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
+ u32 lstatus;
+ int i, rq = 0;
+ int do_tstamp, do_csum, do_vlan;
+ u32 bufaddr;
+ unsigned long flags;
+ unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
+
+ rq = skb->queue_mapping;
+ tx_queue = priv->tx_queue[rq];
+ txq = netdev_get_tx_queue(dev, rq);
+ base = tx_queue->tx_bd_base;
+ regs = tx_queue->grp->regs;
+
+ do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
+ do_vlan = skb_vlan_tag_present(skb);
+ do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en;
+
+ if (do_csum || do_vlan)
+ fcb_len = GMAC_FCB_LEN;
+
+ /* check if time stamp should be generated */
+ if (unlikely(do_tstamp))
+ fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+
+ /* make space for additional header when fcb is needed */
+ if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
+ struct sk_buff *skb_new;
+
+ skb_new = skb_realloc_headroom(skb, fcb_len);
+ if (!skb_new) {
+ dev->stats.tx_errors++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb->sk)
+ skb_set_owner_w(skb_new, skb->sk);
+ dev_consume_skb_any(skb);
+ skb = skb_new;
+ }
+
+ /* total number of fragments in the SKB */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ /* calculate the required number of TxBDs for this skb */
+ if (unlikely(do_tstamp))
+ nr_txbds = nr_frags + 2;
+ else
+ nr_txbds = nr_frags + 1;
+
+ /* check if there is space to queue this packet */
+ if (nr_txbds > tx_queue->num_txbdfree) {
+ /* no space, stop the queue */
+ netif_tx_stop_queue(txq);
+ dev->stats.tx_fifo_errors++;
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Update transmit stats */
+ bytes_sent = skb->len;
+ tx_queue->stats.tx_bytes += bytes_sent;
+ /* keep Tx bytes on wire for BQL accounting */
+ GFAR_CB(skb)->bytes_sent = bytes_sent;
+ tx_queue->stats.tx_packets++;
+
+ txbdp = txbdp_start = tx_queue->cur_tx;
+ lstatus = be32_to_cpu(txbdp->lstatus);
+
+ /* Time stamp insertion requires one additional TxBD */
+ if (unlikely(do_tstamp))
+ txbdp_tstamp = txbdp = next_txbd(txbdp, base,
+ tx_queue->tx_ring_size);
+
+ if (nr_frags == 0) {
+ if (unlikely(do_tstamp)) {
+ u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
+
+ lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
+ } else {
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ }
+ } else {
+ /* Place the fragment addresses and lengths into the TxBDs */
+ for (i = 0; i < nr_frags; i++) {
+ unsigned int frag_len;
+ /* Point at the next BD, wrapping as needed */
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+
+ frag_len = skb_shinfo(skb)->frags[i].size;
+
+ lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
+ BD_LFLAG(TXBD_READY);
+
+ /* Handle the last BD specially */
+ if (i == nr_frags - 1)
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+
+ bufaddr = skb_frag_dma_map(priv->dev,
+ &skb_shinfo(skb)->frags[i],
+ 0,
+ frag_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
+
+ /* set the TxBD length and buffer pointer */
+ txbdp->bufPtr = cpu_to_be32(bufaddr);
+ txbdp->lstatus = cpu_to_be32(lstatus);
+ }
+
+ lstatus = be32_to_cpu(txbdp_start->lstatus);
+ }
+
+ /* Add TxPAL between FCB and frame if required */
+ if (unlikely(do_tstamp)) {
+ skb_push(skb, GMAC_TXPAL_LEN);
+ memset(skb->data, 0, GMAC_TXPAL_LEN);
+ }
+
+ /* Add TxFCB if required */
+ if (fcb_len) {
+ fcb = gfar_add_fcb(skb);
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ }
+
+ /* Set up checksumming */
+ if (do_csum) {
+ gfar_tx_checksum(skb, fcb, fcb_len);
+
+ if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
+ unlikely(gfar_csum_errata_76(priv, skb->len))) {
+ __skb_pull(skb, GMAC_FCB_LEN);
+ skb_checksum_help(skb);
+ if (do_vlan || do_tstamp) {
+ /* put back a new fcb for vlan/tstamp TOE */
+ fcb = gfar_add_fcb(skb);
+ } else {
+ /* Tx TOE not used */
+ lstatus &= ~(BD_LFLAG(TXBD_TOE));
+ fcb = NULL;
+ }
+ }
+ }
+
+ if (do_vlan)
+ gfar_tx_vlan(skb, fcb);
+
+ /* Setup tx hardware time stamping if requested */
+ if (unlikely(do_tstamp)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ fcb->ptp = 1;
+ }
+
+ bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
+
+ txbdp_start->bufPtr = cpu_to_be32(bufaddr);
+
+ /* If time stamping is requested one additional TxBD must be set up. The
+ * first TxBD points to the FCB and must have a data length of
+ * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
+ * the full frame length.
+ */
+ if (unlikely(do_tstamp)) {
+ u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
+
+ bufaddr = be32_to_cpu(txbdp_start->bufPtr);
+ bufaddr += fcb_len;
+ lstatus_ts |= BD_LFLAG(TXBD_READY) |
+ (skb_headlen(skb) - fcb_len);
+
+ txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
+ txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
+ } else {
+ lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
+ }
+
+ netdev_tx_sent_queue(txq, bytes_sent);
+
+ /* We can work in parallel with gfar_clean_tx_ring(), except
+ * when modifying num_txbdfree. Note that we didn't grab the lock
+ * when we were reading the num_txbdfree and checking for available
+ * space, that's because outside of this function it can only grow,
+ * and once we've got needed space, it cannot suddenly disappear.
+ *
+ * The lock also protects us from gfar_error(), which can modify
+ * regs->tstat and thus retrigger the transfers, which is why we
+ * also must grab the lock before setting ready bit for the first
+ * to be transmitted BD.
+ */
+ spin_lock_irqsave(&tx_queue->txlock, flags);
+
+ gfar_wmb();
+
+ txbdp_start->lstatus = cpu_to_be32(lstatus);
+
+ gfar_wmb(); /* force lstatus write before tx_skbuff */
+
+ tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
+
+ /* Update the current skb pointer to the next entry we will use
+ * (wrapping if necessary)
+ */
+ tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
+ TX_RING_MOD_MASK(tx_queue->tx_ring_size);
+
+ tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+
+ /* reduce TxBD free count */
+ tx_queue->num_txbdfree -= (nr_txbds);
+
+ /* If the next BD still needs to be cleaned up, then the bds
+ * are full. We need to tell the kernel to stop sending us stuff.
+ */
+ if (!tx_queue->num_txbdfree) {
+ netif_tx_stop_queue(txq);
+
+ dev->stats.tx_fifo_errors++;
+ }
+
+ /* Tell the DMA to go go go */
+ gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
+
+ /* Unlock priv */
+ spin_unlock_irqrestore(&tx_queue->txlock, flags);
+
+ return NETDEV_TX_OK;
+
+dma_map_err:
+ txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
+ if (do_tstamp)
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ for (i = 0; i < nr_frags; i++) {
+ lstatus = be32_to_cpu(txbdp->lstatus);
+ if (!(lstatus & BD_LFLAG(TXBD_READY)))
+ break;
+
+ lstatus &= ~BD_LFLAG(TXBD_READY);
+ txbdp->lstatus = cpu_to_be32(lstatus);
+ bufaddr = be32_to_cpu(txbdp->bufPtr);
+ dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
+ DMA_TO_DEVICE);
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ }
+ gfar_wmb();
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/* Stops the kernel queue, and halts the controller */
+static int gfar_close(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ cancel_work_sync(&priv->reset_task);
+ stop_gfar(dev);
+
+ /* Disconnect from the PHY */
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+
+ gfar_free_irq(priv);
+
+ return 0;
+}
+
+/* Changes the mac address if the controller is not running. */
+static int gfar_set_mac_address(struct net_device *dev)
+{
+ gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
+
+ return 0;
+}
+
+static int gfar_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int frame_size = new_mtu + ETH_HLEN;
+
+ if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+ netif_err(priv, drv, dev, "Invalid MTU setting\n");
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ if (dev->flags & IFF_UP)
+ stop_gfar(dev);
+
+ dev->mtu = new_mtu;
+
+ if (dev->flags & IFF_UP)
+ startup_gfar(dev);
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+ return 0;
+}
+
+void reset_gfar(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ stop_gfar(ndev);
+ startup_gfar(ndev);
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+}
+
+/* gfar_reset_task gets scheduled when a packet has not been
+ * transmitted after a set amount of time.
+ * For now, assume that clearing out all the structures, and
+ * starting over will fix the problem.
+ */
+static void gfar_reset_task(struct work_struct *work)
+{
+ struct gfar_private *priv = container_of(work, struct gfar_private,
+ reset_task);
+ reset_gfar(priv->ndev);
+}
+
+static void gfar_timeout(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ dev->stats.tx_errors++;
+ schedule_work(&priv->reset_task);
+}
+
+static void gfar_align_skb(struct sk_buff *skb)
+{
+ /* We need the data buffer to be aligned properly. We will reserve
+ * as many bytes as needed to align the data properly
+ */
+ skb_reserve(skb, RXBUF_ALIGNMENT -
+ (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
+}
+
+/* Interrupt Handler for Transmit complete */
+static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
+{
+ struct net_device *dev = tx_queue->dev;
+ struct netdev_queue *txq;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct txbd8 *bdp, *next = NULL;
+ struct txbd8 *lbdp = NULL;
+ struct txbd8 *base = tx_queue->tx_bd_base;
+ struct sk_buff *skb;
+ int skb_dirtytx;
+ int tx_ring_size = tx_queue->tx_ring_size;
+ int frags = 0, nr_txbds = 0;
+ int i;
+ int howmany = 0;
+ int tqi = tx_queue->qindex;
+ unsigned int bytes_sent = 0;
+ u32 lstatus;
+ size_t buflen;
+
+ txq = netdev_get_tx_queue(dev, tqi);
+ bdp = tx_queue->dirty_tx;
+ skb_dirtytx = tx_queue->skb_dirtytx;
+
+ while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+ unsigned long flags;
+
+ frags = skb_shinfo(skb)->nr_frags;
+
+ /* When time stamping, one additional TxBD must be freed.
+ * Also, we need to dma_unmap_single() the TxPAL.
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ nr_txbds = frags + 2;
+ else
+ nr_txbds = frags + 1;
+
+ lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
+
+ lstatus = be32_to_cpu(lbdp->lstatus);
+
+ /* Only clean completed frames */
+ if ((lstatus & BD_LFLAG(TXBD_READY)) &&
+ (lstatus & BD_LENGTH_MASK))
+ break;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ next = next_txbd(bdp, base, tx_ring_size);
+ buflen = be16_to_cpu(next->length) +
+ GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+ } else
+ buflen = be16_to_cpu(bdp->length);
+
+ dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
+ buflen, DMA_TO_DEVICE);
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+ skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ gfar_clear_txbd_status(bdp);
+ bdp = next;
+ }
+
+ gfar_clear_txbd_status(bdp);
+ bdp = next_txbd(bdp, base, tx_ring_size);
+
+ for (i = 0; i < frags; i++) {
+ dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
+ be16_to_cpu(bdp->length),
+ DMA_TO_DEVICE);
+ gfar_clear_txbd_status(bdp);
+ bdp = next_txbd(bdp, base, tx_ring_size);
+ }
+
+ bytes_sent += GFAR_CB(skb)->bytes_sent;
+
+ dev_kfree_skb_any(skb);
+
+ tx_queue->tx_skbuff[skb_dirtytx] = NULL;
+
+ skb_dirtytx = (skb_dirtytx + 1) &
+ TX_RING_MOD_MASK(tx_ring_size);
+
+ howmany++;
+ spin_lock_irqsave(&tx_queue->txlock, flags);
+ tx_queue->num_txbdfree += nr_txbds;
+ spin_unlock_irqrestore(&tx_queue->txlock, flags);
+ }
+
+ /* If we freed a buffer, we can restart transmission, if necessary */
+ if (tx_queue->num_txbdfree &&
+ netif_tx_queue_stopped(txq) &&
+ !(test_bit(GFAR_DOWN, &priv->state)))
+ netif_wake_subqueue(priv->ndev, tqi);
+
+ /* Update dirty indicators */
+ tx_queue->skb_dirtytx = skb_dirtytx;
+ tx_queue->dirty_tx = bdp;
+
+ netdev_tx_completed_queue(txq, howmany, bytes_sent);
+}
+
+static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
+ if (!skb)
+ return NULL;
+
+ gfar_align_skb(skb);
+
+ return skb;
+}
+
+static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ dma_addr_t addr;
+
+ skb = gfar_alloc_skb(dev);
+ if (!skb)
+ return NULL;
+
+ addr = dma_map_single(priv->dev, skb->data,
+ priv->rx_buffer_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ *bufaddr = addr;
+ return skb;
+}
+
+static inline void count_errors(unsigned short status, struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct gfar_extra_stats *estats = &priv->extra_stats;
+
+ /* If the packet was truncated, none of the other errors matter */
+ if (status & RXBD_TRUNCATED) {
+ stats->rx_length_errors++;
+
+ atomic64_inc(&estats->rx_trunc);
+
+ return;
+ }
+ /* Count the errors, if there were any */
+ if (status & (RXBD_LARGE | RXBD_SHORT)) {
+ stats->rx_length_errors++;
+
+ if (status & RXBD_LARGE)
+ atomic64_inc(&estats->rx_large);
+ else
+ atomic64_inc(&estats->rx_short);
+ }
+ if (status & RXBD_NONOCTET) {
+ stats->rx_frame_errors++;
+ atomic64_inc(&estats->rx_nonoctet);
+ }
+ if (status & RXBD_CRCERR) {
+ atomic64_inc(&estats->rx_crcerr);
+ stats->rx_crc_errors++;
+ }
+ if (status & RXBD_OVERRUN) {
+ atomic64_inc(&estats->rx_overrun);
+ stats->rx_crc_errors++;
+ }
+}
+
+irqreturn_t gfar_receive(int irq, void *grp_id)
+{
+ struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
+ unsigned long flags;
+ u32 imask;
+
+ if (likely(napi_schedule_prep(&grp->napi_rx))) {
+ spin_lock_irqsave(&grp->grplock, flags);
+ imask = gfar_read(&grp->regs->imask);
+ imask &= IMASK_RX_DISABLED;
+ gfar_write(&grp->regs->imask, imask);
+ spin_unlock_irqrestore(&grp->grplock, flags);
+ __napi_schedule(&grp->napi_rx);
+ } else {
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived.
+ */
+ gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *grp_id)
+{
+ struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
+ unsigned long flags;
+ u32 imask;
+
+ if (likely(napi_schedule_prep(&grp->napi_tx))) {
+ spin_lock_irqsave(&grp->grplock, flags);
+ imask = gfar_read(&grp->regs->imask);
+ imask &= IMASK_TX_DISABLED;
+ gfar_write(&grp->regs->imask, imask);
+ spin_unlock_irqrestore(&grp->grplock, flags);
+ __napi_schedule(&grp->napi_tx);
+ } else {
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived.
+ */
+ gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
+{
+ /* If valid headers were found, and valid sums
+ * were verified, then we tell the kernel that no
+ * checksumming is necessary. Otherwise, it is [FIXME]
+ */
+ if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
+ (RXFCB_CIP | RXFCB_CTU))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+}
+
+/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull, struct napi_struct *napi)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct rxfcb *fcb = NULL;
+
+ /* fcb is at the beginning if exists */
+ fcb = (struct rxfcb *)skb->data;
+
+ /* Remove the FCB from the skb
+ * Remove the padded bytes, if there are any
+ */
+ if (amount_pull) {
+ skb_record_rx_queue(skb, fcb->rq);
+ skb_pull(skb, amount_pull);
+ }
+
+ /* Get receive timestamp from the skb */
+ if (priv->hwts_rx_en) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ u64 *ns = (u64 *) skb->data;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(*ns);
+ }
+
+ if (priv->padding)
+ skb_pull(skb, priv->padding);
+
+ if (dev->features & NETIF_F_RXCSUM)
+ gfar_rx_checksum(skb, fcb);
+
+ /* Tell the skb what kind of packet this is */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
+ * Even if vlan rx accel is disabled, on some chips
+ * RXFCB_VLN is pseudo randomly set.
+ */
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+ be16_to_cpu(fcb->flags) & RXFCB_VLN)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(fcb->vlctl));
+
+ /* Send the packet up the stack */
+ napi_gro_receive(napi, skb);
+
+}
+
+/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
+ * until the budget/quota has been reached. Returns the number
+ * of frames handled
+ */
+int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
+{
+ struct net_device *dev = rx_queue->dev;
+ struct rxbd8 *bdp, *base;
+ struct sk_buff *skb;
+ int pkt_len;
+ int amount_pull;
+ int howmany = 0;
+ struct gfar_private *priv = netdev_priv(dev);
+
+ /* Get the first full descriptor */
+ bdp = rx_queue->cur_rx;
+ base = rx_queue->rx_bd_base;
+
+ amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
+
+ while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
+ struct sk_buff *newskb;
+ dma_addr_t bufaddr;
+
+ rmb();
+
+ /* Add another skb for the future */
+ newskb = gfar_new_skb(dev, &bufaddr);
+
+ skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
+
+ dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
+ priv->rx_buffer_size, DMA_FROM_DEVICE);
+
+ if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
+ be16_to_cpu(bdp->length) > priv->rx_buffer_size))
+ bdp->status = cpu_to_be16(RXBD_LARGE);
+
+ /* We drop the frame if we failed to allocate a new buffer */
+ if (unlikely(!newskb ||
+ !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
+ be16_to_cpu(bdp->status) & RXBD_ERR)) {
+ count_errors(be16_to_cpu(bdp->status), dev);
+
+ if (unlikely(!newskb)) {
+ newskb = skb;
+ bufaddr = be32_to_cpu(bdp->bufPtr);
+ } else if (skb)
+ dev_kfree_skb(skb);
+ } else {
+ /* Increment the number of packets */
+ rx_queue->stats.rx_packets++;
+ howmany++;
+
+ if (likely(skb)) {
+ pkt_len = be16_to_cpu(bdp->length) -
+ ETH_FCS_LEN;
+ /* Remove the FCS from the packet length */
+ skb_put(skb, pkt_len);
+ rx_queue->stats.rx_bytes += pkt_len;
+ skb_record_rx_queue(skb, rx_queue->qindex);
+ gfar_process_frame(dev, skb, amount_pull,
+ &rx_queue->grp->napi_rx);
+
+ } else {
+ netif_warn(priv, rx_err, dev, "Missing skb!\n");
+ rx_queue->stats.rx_dropped++;
+ atomic64_inc(&priv->extra_stats.rx_skbmissing);
+ }
+
+ }
+
+ rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
+
+ /* Setup the new bdp */
+ gfar_init_rxbdp(rx_queue, bdp, bufaddr);
+
+ /* Update Last Free RxBD pointer for LFC */
+ if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
+ gfar_write(rx_queue->rfbptr, (u32)bdp);
+
+ /* Update to the next pointer */
+ bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
+
+ /* update to point at the next skb */
+ rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
+ RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+ }
+
+ /* Update the current rxbd pointer to be the next one */
+ rx_queue->cur_rx = bdp;
+
+ return howmany;
+}
+
+static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
+{
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi_rx);
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
+ int work_done = 0;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+ gfar_write(&regs->ievent, IEVENT_RX_MASK);
+
+ work_done = gfar_clean_rx_ring(rx_queue, budget);
+
+ if (work_done < budget) {
+ u32 imask;
+ napi_complete(napi);
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
+
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_RX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
+ }
+
+ return work_done;
+}
+
+static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
+{
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi_tx);
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
+ u32 imask;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+ gfar_write(&regs->ievent, IEVENT_TX_MASK);
+
+ /* run Tx cleanup to completion */
+ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
+ gfar_clean_tx_ring(tx_queue);
+
+ napi_complete(napi);
+
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_TX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
+
+ return 0;
+}
+
+static int gfar_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi_rx);
+ struct gfar_private *priv = gfargrp->priv;
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ int work_done = 0, work_done_per_q = 0;
+ int i, budget_per_q = 0;
+ unsigned long rstat_rxf;
+ int num_act_queues;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+ gfar_write(&regs->ievent, IEVENT_RX_MASK);
+
+ rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
+
+ num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
+ if (num_act_queues)
+ budget_per_q = budget/num_act_queues;
+
+ for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+ /* skip queue if not active */
+ if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
+ continue;
+
+ rx_queue = priv->rx_queue[i];
+ work_done_per_q =
+ gfar_clean_rx_ring(rx_queue, budget_per_q);
+ work_done += work_done_per_q;
+
+ /* finished processing this queue */
+ if (work_done_per_q < budget_per_q) {
+ /* clear active queue hw indication */
+ gfar_write(&regs->rstat,
+ RSTAT_CLEAR_RXF0 >> i);
+ num_act_queues--;
+
+ if (!num_act_queues)
+ break;
+ }
+ }
+
+ if (!num_act_queues) {
+ u32 imask;
+ napi_complete(napi);
+
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
+
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_RX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
+ }
+
+ return work_done;
+}
+
+static int gfar_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi_tx);
+ struct gfar_private *priv = gfargrp->priv;
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ int has_tx_work = 0;
+ int i;
+
+ /* Clear IEVENT, so interrupts aren't called again
+ * because of the packets that have already arrived
+ */
+ gfar_write(&regs->ievent, IEVENT_TX_MASK);
+
+ for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+ tx_queue = priv->tx_queue[i];
+ /* run Tx cleanup to completion */
+ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+ gfar_clean_tx_ring(tx_queue);
+ has_tx_work = 1;
+ }
+ }
+
+ if (!has_tx_work) {
+ u32 imask;
+ napi_complete(napi);
+
+ spin_lock_irq(&gfargrp->grplock);
+ imask = gfar_read(&regs->imask);
+ imask |= IMASK_TX_DEFAULT;
+ gfar_write(&regs->imask, imask);
+ spin_unlock_irq(&gfargrp->grplock);
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void gfar_netpoll(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int i;
+
+ /* If the device has multiple interrupts, run tx/rx */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
+
+ disable_irq(gfar_irq(grp, TX)->irq);
+ disable_irq(gfar_irq(grp, RX)->irq);
+ disable_irq(gfar_irq(grp, ER)->irq);
+ gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
+ enable_irq(gfar_irq(grp, ER)->irq);
+ enable_irq(gfar_irq(grp, RX)->irq);
+ enable_irq(gfar_irq(grp, TX)->irq);
+ }
+ } else {
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
+
+ disable_irq(gfar_irq(grp, TX)->irq);
+ gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
+ enable_irq(gfar_irq(grp, TX)->irq);
+ }
+ }
+}
+#endif
+
+/* The interrupt handler for devices with one interrupt */
+static irqreturn_t gfar_interrupt(int irq, void *grp_id)
+{
+ struct gfar_priv_grp *gfargrp = grp_id;
+
+ /* Save ievent for future reference */
+ u32 events = gfar_read(&gfargrp->regs->ievent);
+
+ /* Check for reception */
+ if (events & IEVENT_RX_MASK)
+ gfar_receive(irq, grp_id);
+
+ /* Check for transmit completion */
+ if (events & IEVENT_TX_MASK)
+ gfar_transmit(irq, grp_id);
+
+ /* Check for errors */
+ if (events & IEVENT_ERR_MASK)
+ gfar_error(irq, grp_id);
+
+ return IRQ_HANDLED;
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the phydev structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+static void adjust_link(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (unlikely(phydev->link != priv->oldlink ||
+ (phydev->link && (phydev->duplex != priv->oldduplex ||
+ phydev->speed != priv->oldspeed))))
+ gfar_update_link_state(priv);
+}
+
+/* Update the hash table based on the current list of multicast
+ * addresses we subscribe to. Also, change the promiscuity of
+ * the device based on the flags (this function is called
+ * whenever dev->flags is changed
+ */
+static void gfar_set_multi(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Set RCTRL to PROM */
+ tempval = gfar_read(&regs->rctrl);
+ tempval |= RCTRL_PROM;
+ gfar_write(&regs->rctrl, tempval);
+ } else {
+ /* Set RCTRL to not PROM */
+ tempval = gfar_read(&regs->rctrl);
+ tempval &= ~(RCTRL_PROM);
+ gfar_write(&regs->rctrl, tempval);
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Set the hash to rx all multicast frames */
+ gfar_write(&regs->igaddr0, 0xffffffff);
+ gfar_write(&regs->igaddr1, 0xffffffff);
+ gfar_write(&regs->igaddr2, 0xffffffff);
+ gfar_write(&regs->igaddr3, 0xffffffff);
+ gfar_write(&regs->igaddr4, 0xffffffff);
+ gfar_write(&regs->igaddr5, 0xffffffff);
+ gfar_write(&regs->igaddr6, 0xffffffff);
+ gfar_write(&regs->igaddr7, 0xffffffff);
+ gfar_write(&regs->gaddr0, 0xffffffff);
+ gfar_write(&regs->gaddr1, 0xffffffff);
+ gfar_write(&regs->gaddr2, 0xffffffff);
+ gfar_write(&regs->gaddr3, 0xffffffff);
+ gfar_write(&regs->gaddr4, 0xffffffff);
+ gfar_write(&regs->gaddr5, 0xffffffff);
+ gfar_write(&regs->gaddr6, 0xffffffff);
+ gfar_write(&regs->gaddr7, 0xffffffff);
+ } else {
+ int em_num;
+ int idx;
+
+ /* zero out the hash */
+ gfar_write(&regs->igaddr0, 0x0);
+ gfar_write(&regs->igaddr1, 0x0);
+ gfar_write(&regs->igaddr2, 0x0);
+ gfar_write(&regs->igaddr3, 0x0);
+ gfar_write(&regs->igaddr4, 0x0);
+ gfar_write(&regs->igaddr5, 0x0);
+ gfar_write(&regs->igaddr6, 0x0);
+ gfar_write(&regs->igaddr7, 0x0);
+ gfar_write(&regs->gaddr0, 0x0);
+ gfar_write(&regs->gaddr1, 0x0);
+ gfar_write(&regs->gaddr2, 0x0);
+ gfar_write(&regs->gaddr3, 0x0);
+ gfar_write(&regs->gaddr4, 0x0);
+ gfar_write(&regs->gaddr5, 0x0);
+ gfar_write(&regs->gaddr6, 0x0);
+ gfar_write(&regs->gaddr7, 0x0);
+
+ /* If we have extended hash tables, we need to
+ * clear the exact match registers to prepare for
+ * setting them
+ */
+ if (priv->extended_hash) {
+ em_num = GFAR_EM_NUM + 1;
+ gfar_clear_exact_match(dev);
+ idx = 1;
+ } else {
+ idx = 0;
+ em_num = 0;
+ }
+
+ if (netdev_mc_empty(dev))
+ return;
+
+ /* Parse the list, and set the appropriate bits */
+ netdev_for_each_mc_addr(ha, dev) {
+ if (idx < em_num) {
+ gfar_set_mac_for_addr(dev, idx, ha->addr);
+ idx++;
+ } else
+ gfar_set_hash_for_addr(dev, ha->addr);
+ }
+ }
+}
+
+
+/* Clears each of the exact match registers to zero, so they
+ * don't interfere with normal reception
+ */
+static void gfar_clear_exact_match(struct net_device *dev)
+{
+ int idx;
+ static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+
+ for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
+ gfar_set_mac_for_addr(dev, idx, zero_arr);
+}
+
+/* Set the appropriate hash bit for the given addr */
+/* The algorithm works like so:
+ * 1) Take the Destination Address (ie the multicast address), and
+ * do a CRC on it (little endian), and reverse the bits of the
+ * result.
+ * 2) Use the 8 most significant bits as a hash into a 256-entry
+ * table. The table is controlled through 8 32-bit registers:
+ * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
+ * gaddr7. This means that the 3 most significant bits in the
+ * hash index which gaddr register to use, and the 5 other bits
+ * indicate which bit (assuming an IBM numbering scheme, which
+ * for PowerPC (tm) is usually the case) in the register holds
+ * the entry.
+ */
+static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
+{
+ u32 tempval;
+ struct gfar_private *priv = netdev_priv(dev);
+ u32 result = ether_crc(ETH_ALEN, addr);
+ int width = priv->hash_width;
+ u8 whichbit = (result >> (32 - width)) & 0x1f;
+ u8 whichreg = result >> (32 - width + 5);
+ u32 value = (1 << (31-whichbit));
+
+ tempval = gfar_read(priv->hash_regs[whichreg]);
+ tempval |= value;
+ gfar_write(priv->hash_regs[whichreg], tempval);
+}
+
+
+/* There are multiple MAC Address register pairs on some controllers
+ * This function sets the numth pair to a given address
+ */
+static void gfar_set_mac_for_addr(struct net_device *dev, int num,
+ const u8 *addr)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tempval;
+ u32 __iomem *macptr = &regs->macstnaddr1;
+
+ macptr += num*2;
+
+ /* For a station address of 0x12345678ABCD in transmission
+ * order (BE), MACnADDR1 is set to 0xCDAB7856 and
+ * MACnADDR2 is set to 0x34120000.
+ */
+ tempval = (addr[5] << 24) | (addr[4] << 16) |
+ (addr[3] << 8) | addr[2];
+
+ gfar_write(macptr, tempval);
+
+ tempval = (addr[1] << 24) | (addr[0] << 16);
+
+ gfar_write(macptr+1, tempval);
+}
+
+/* GFAR error interrupt handler */
+static irqreturn_t gfar_error(int irq, void *grp_id)
+{
+ struct gfar_priv_grp *gfargrp = grp_id;
+ struct gfar __iomem *regs = gfargrp->regs;
+ struct gfar_private *priv= gfargrp->priv;
+ struct net_device *dev = priv->ndev;
+
+ /* Save ievent for future reference */
+ u32 events = gfar_read(&regs->ievent);
+
+ /* Clear IEVENT */
+ gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
+
+ /* Magic Packet is not an error. */
+ if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
+ (events & IEVENT_MAG))
+ events &= ~IEVENT_MAG;
+
+ /* Hmm... */
+ if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
+ netdev_dbg(dev,
+ "error interrupt (ievent=0x%08x imask=0x%08x)\n",
+ events, gfar_read(&regs->imask));
+
+ /* Update the error counters */
+ if (events & IEVENT_TXE) {
+ dev->stats.tx_errors++;
+
+ if (events & IEVENT_LC)
+ dev->stats.tx_window_errors++;
+ if (events & IEVENT_CRL)
+ dev->stats.tx_aborted_errors++;
+ if (events & IEVENT_XFUN) {
+ unsigned long flags;
+
+ netif_dbg(priv, tx_err, dev,
+ "TX FIFO underrun, packet dropped\n");
+ dev->stats.tx_dropped++;
+ atomic64_inc(&priv->extra_stats.tx_underrun);
+
+ local_irq_save_nort(flags);
+ lock_tx_qs(priv);
+
+ /* Reactivate the Tx Queues */
+ gfar_write(&regs->tstat, gfargrp->tstat);
+
+ unlock_tx_qs(priv);
+ local_irq_restore_nort(flags);
+ }
+ netif_dbg(priv, tx_err, dev, "Transmit Error\n");
+ }
+ if (events & IEVENT_BSY) {
+ dev->stats.rx_errors++;
+ atomic64_inc(&priv->extra_stats.rx_bsy);
+
+ gfar_receive(irq, grp_id);
+
+ netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
+ gfar_read(&regs->rstat));
+ }
+ if (events & IEVENT_BABR) {
+ dev->stats.rx_errors++;
+ atomic64_inc(&priv->extra_stats.rx_babr);
+
+ netif_dbg(priv, rx_err, dev, "babbling RX error\n");
+ }
+ if (events & IEVENT_EBERR) {
+ atomic64_inc(&priv->extra_stats.eberr);
+ netif_dbg(priv, rx_err, dev, "bus error\n");
+ }
+ if (events & IEVENT_RXC)
+ netif_dbg(priv, rx_status, dev, "control frame\n");
+
+ if (events & IEVENT_BABT) {
+ atomic64_inc(&priv->extra_stats.tx_babt);
+ netif_dbg(priv, tx_err, dev, "babbling TX error\n");
+ }
+ return IRQ_HANDLED;
+}
+
+static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+ u32 val = 0;
+
+ if (!phydev->duplex)
+ return val;
+
+ if (!priv->pause_aneg_en) {
+ if (priv->tx_pause_en)
+ val |= MACCFG1_TX_FLOW;
+ if (priv->rx_pause_en)
+ val |= MACCFG1_RX_FLOW;
+ } else {
+ u16 lcl_adv, rmt_adv;
+ u8 flowctrl;
+ /* get link partner capabilities */
+ rmt_adv = 0;
+ if (phydev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ lcl_adv = 0;
+ if (phydev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (phydev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (flowctrl & FLOW_CTRL_TX)
+ val |= MACCFG1_TX_FLOW;
+ if (flowctrl & FLOW_CTRL_RX)
+ val |= MACCFG1_RX_FLOW;
+ }
+
+ return val;
+}
+
+static noinline void gfar_update_link_state(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ struct phy_device *phydev = priv->phydev;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ int i;
+ struct rxbd8 *bdp;
+
+ if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
+ return;
+
+ if (phydev->link) {
+ u32 tempval1 = gfar_read(&regs->maccfg1);
+ u32 tempval = gfar_read(&regs->maccfg2);
+ u32 ecntrl = gfar_read(&regs->ecntrl);
+ u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
+
+ if (phydev->duplex != priv->oldduplex) {
+ if (!(phydev->duplex))
+ tempval &= ~(MACCFG2_FULL_DUPLEX);
+ else
+ tempval |= MACCFG2_FULL_DUPLEX;
+
+ priv->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != priv->oldspeed) {
+ switch (phydev->speed) {
+ case 1000:
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+
+ ecntrl &= ~(ECNTRL_R100);
+ break;
+ case 100:
+ case 10:
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
+
+ /* Reduced mode distinguishes
+ * between 10 and 100
+ */
+ if (phydev->speed == SPEED_100)
+ ecntrl |= ECNTRL_R100;
+ else
+ ecntrl &= ~(ECNTRL_R100);
+ break;
+ default:
+ netif_warn(priv, link, priv->ndev,
+ "Ack! Speed (%d) is not 10/100/1000!\n",
+ phydev->speed);
+ break;
+ }
+
+ priv->oldspeed = phydev->speed;
+ }
+
+ tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ tempval1 |= gfar_get_flowctrl_cfg(priv);
+
+ /* Turn last free buffer recording on */
+ if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_queue = priv->rx_queue[i];
+ bdp = rx_queue->cur_rx;
+ /* skip to previous bd */
+ bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
+ rx_queue->rx_bd_base,
+ rx_queue->rx_ring_size);
+
+ if (rx_queue->rfbptr)
+ gfar_write(rx_queue->rfbptr, (u32)bdp);
+ }
+
+ priv->tx_actual_en = 1;
+ }
+
+ if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
+ priv->tx_actual_en = 0;
+
+ gfar_write(&regs->maccfg1, tempval1);
+ gfar_write(&regs->maccfg2, tempval);
+ gfar_write(&regs->ecntrl, ecntrl);
+
+ if (!priv->oldlink)
+ priv->oldlink = 1;
+
+ } else if (priv->oldlink) {
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (netif_msg_link(priv))
+ phy_print_status(phydev);
+}
+
+static const struct of_device_id gfar_match[] =
+{
+ {
+ .type = "network",
+ .compatible = "gianfar",
+ },
+ {
+ .compatible = "fsl,etsec2",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gfar_match);
+
+/* Structure for a device driver */
+static struct platform_driver gfar_driver = {
+ .driver = {
+ .name = "fsl-gianfar",
+ .pm = GFAR_PM_OPS,
+ .of_match_table = gfar_match,
+ },
+ .probe = gfar_probe,
+ .remove = gfar_remove,
+};
+
+module_platform_driver(gfar_driver);
diff --git a/kernel/drivers/net/ethernet/freescale/gianfar.h b/kernel/drivers/net/ethernet/freescale/gianfar.h
new file mode 100644
index 000000000..daa1d37de
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/gianfar.h
@@ -0,0 +1,1343 @@
+/*
+ * drivers/net/ethernet/freescale/gianfar.h
+ *
+ * Gianfar Ethernet Driver
+ * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
+ * Based on 8260_io/fcc_enet.c
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Still left to do:
+ * -Add support for module parameters
+ * -Add patch for ethtool phys id
+ */
+#ifndef __GIANFAR_H
+#define __GIANFAR_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/workqueue.h>
+#include <linux/ethtool.h>
+
+struct ethtool_flow_spec_container {
+ struct ethtool_rx_flow_spec fs;
+ struct list_head list;
+};
+
+struct ethtool_rx_list {
+ struct list_head list;
+ unsigned int count;
+};
+
+/* The maximum number of packets to be handled in one call of gfar_poll */
+#define GFAR_DEV_WEIGHT 64
+
+/* Length for FCB */
+#define GMAC_FCB_LEN 8
+
+/* Length for TxPAL */
+#define GMAC_TXPAL_LEN 16
+
+/* Default padding amount */
+#define DEFAULT_PADDING 2
+
+/* Number of bytes to align the rx bufs to */
+#define RXBUF_ALIGNMENT 64
+
+/* The number of bytes which composes a unit for the purpose of
+ * allocating data buffers. ie-for any given MTU, the data buffer
+ * will be the next highest multiple of 512 bytes. */
+#define INCREMENTAL_BUFFER_SIZE 512
+
+#define PHY_INIT_TIMEOUT 100000
+
+#define DRV_NAME "gfar-enet"
+extern const char gfar_driver_version[];
+
+/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
+#define MAX_TX_QS 0x8
+#define MAX_RX_QS 0x8
+
+/* MAXIMUM NUMBER OF GROUPS SUPPORTED */
+#define MAXGROUPS 0x2
+
+/* These need to be powers of 2 for this driver */
+#define DEFAULT_TX_RING_SIZE 256
+#define DEFAULT_RX_RING_SIZE 256
+
+#define GFAR_RX_MAX_RING_SIZE 256
+#define GFAR_TX_MAX_RING_SIZE 256
+
+#define GFAR_MAX_FIFO_THRESHOLD 511
+#define GFAR_MAX_FIFO_STARVE 511
+#define GFAR_MAX_FIFO_STARVE_OFF 511
+
+#define FBTHR_SHIFT 24
+#define DEFAULT_RX_LFC_THR 16
+#define DEFAULT_LFC_PTVVAL 4
+
+#define DEFAULT_RX_BUFFER_SIZE 1536
+#define TX_RING_MOD_MASK(size) (size-1)
+#define RX_RING_MOD_MASK(size) (size-1)
+#define JUMBO_BUFFER_SIZE 9728
+#define JUMBO_FRAME_SIZE 9600
+
+#define DEFAULT_FIFO_TX_THR 0x100
+#define DEFAULT_FIFO_TX_STARVE 0x40
+#define DEFAULT_FIFO_TX_STARVE_OFF 0x80
+#define DEFAULT_BD_STASH 1
+#define DEFAULT_STASH_LENGTH 96
+#define DEFAULT_STASH_INDEX 0
+
+/* The number of Exact Match registers */
+#define GFAR_EM_NUM 15
+
+/* Latency of interface clock in nanoseconds */
+/* Interface clock latency , in this case, means the
+ * time described by a value of 1 in the interrupt
+ * coalescing registers' time fields. Since those fields
+ * refer to the time it takes for 64 clocks to pass, the
+ * latencies are as such:
+ * GBIT = 125MHz => 8ns/clock => 8*64 ns / tick
+ * 100 = 25 MHz => 40ns/clock => 40*64 ns / tick
+ * 10 = 2.5 MHz => 400ns/clock => 400*64 ns / tick
+ */
+#define GFAR_GBIT_TIME 512
+#define GFAR_100_TIME 2560
+#define GFAR_10_TIME 25600
+
+#define DEFAULT_TX_COALESCE 1
+#define DEFAULT_TXCOUNT 16
+#define DEFAULT_TXTIME 21
+
+#define DEFAULT_RXTIME 21
+
+#define DEFAULT_RX_COALESCE 0
+#define DEFAULT_RXCOUNT 0
+
+#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \
+ | SUPPORTED_10baseT_Full \
+ | SUPPORTED_100baseT_Half \
+ | SUPPORTED_100baseT_Full \
+ | SUPPORTED_Autoneg \
+ | SUPPORTED_MII)
+
+#define GFAR_SUPPORTED_GBIT SUPPORTED_1000baseT_Full
+
+/* TBI register addresses */
+#define MII_TBICON 0x11
+
+/* TBICON register bit fields */
+#define TBICON_CLK_SELECT 0x0020
+
+/* MAC register bits */
+#define MACCFG1_SOFT_RESET 0x80000000
+#define MACCFG1_RESET_RX_MC 0x00080000
+#define MACCFG1_RESET_TX_MC 0x00040000
+#define MACCFG1_RESET_RX_FUN 0x00020000
+#define MACCFG1_RESET_TX_FUN 0x00010000
+#define MACCFG1_LOOPBACK 0x00000100
+#define MACCFG1_RX_FLOW 0x00000020
+#define MACCFG1_TX_FLOW 0x00000010
+#define MACCFG1_SYNCD_RX_EN 0x00000008
+#define MACCFG1_RX_EN 0x00000004
+#define MACCFG1_SYNCD_TX_EN 0x00000002
+#define MACCFG1_TX_EN 0x00000001
+
+#define MACCFG2_INIT_SETTINGS 0x00007205
+#define MACCFG2_FULL_DUPLEX 0x00000001
+#define MACCFG2_IF 0x00000300
+#define MACCFG2_MII 0x00000100
+#define MACCFG2_GMII 0x00000200
+#define MACCFG2_HUGEFRAME 0x00000020
+#define MACCFG2_LENGTHCHECK 0x00000010
+#define MACCFG2_MPEN 0x00000008
+
+#define ECNTRL_FIFM 0x00008000
+#define ECNTRL_INIT_SETTINGS 0x00001000
+#define ECNTRL_TBI_MODE 0x00000020
+#define ECNTRL_REDUCED_MODE 0x00000010
+#define ECNTRL_R100 0x00000008
+#define ECNTRL_REDUCED_MII_MODE 0x00000004
+#define ECNTRL_SGMII_MODE 0x00000002
+
+#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE
+
+#define MINFLR_INIT_SETTINGS 0x00000040
+
+/* Tqueue control */
+#define TQUEUE_EN0 0x00008000
+#define TQUEUE_EN1 0x00004000
+#define TQUEUE_EN2 0x00002000
+#define TQUEUE_EN3 0x00001000
+#define TQUEUE_EN4 0x00000800
+#define TQUEUE_EN5 0x00000400
+#define TQUEUE_EN6 0x00000200
+#define TQUEUE_EN7 0x00000100
+#define TQUEUE_EN_ALL 0x0000FF00
+
+#define TR03WT_WT0_MASK 0xFF000000
+#define TR03WT_WT1_MASK 0x00FF0000
+#define TR03WT_WT2_MASK 0x0000FF00
+#define TR03WT_WT3_MASK 0x000000FF
+
+#define TR47WT_WT4_MASK 0xFF000000
+#define TR47WT_WT5_MASK 0x00FF0000
+#define TR47WT_WT6_MASK 0x0000FF00
+#define TR47WT_WT7_MASK 0x000000FF
+
+/* Rqueue control */
+#define RQUEUE_EX0 0x00800000
+#define RQUEUE_EX1 0x00400000
+#define RQUEUE_EX2 0x00200000
+#define RQUEUE_EX3 0x00100000
+#define RQUEUE_EX4 0x00080000
+#define RQUEUE_EX5 0x00040000
+#define RQUEUE_EX6 0x00020000
+#define RQUEUE_EX7 0x00010000
+#define RQUEUE_EX_ALL 0x00FF0000
+
+#define RQUEUE_EN0 0x00000080
+#define RQUEUE_EN1 0x00000040
+#define RQUEUE_EN2 0x00000020
+#define RQUEUE_EN3 0x00000010
+#define RQUEUE_EN4 0x00000008
+#define RQUEUE_EN5 0x00000004
+#define RQUEUE_EN6 0x00000002
+#define RQUEUE_EN7 0x00000001
+#define RQUEUE_EN_ALL 0x000000FF
+
+/* Init to do tx snooping for buffers and descriptors */
+#define DMACTRL_INIT_SETTINGS 0x000000c3
+#define DMACTRL_GRS 0x00000010
+#define DMACTRL_GTS 0x00000008
+
+#define TSTAT_CLEAR_THALT_ALL 0xFF000000
+#define TSTAT_CLEAR_THALT 0x80000000
+#define TSTAT_CLEAR_THALT0 0x80000000
+#define TSTAT_CLEAR_THALT1 0x40000000
+#define TSTAT_CLEAR_THALT2 0x20000000
+#define TSTAT_CLEAR_THALT3 0x10000000
+#define TSTAT_CLEAR_THALT4 0x08000000
+#define TSTAT_CLEAR_THALT5 0x04000000
+#define TSTAT_CLEAR_THALT6 0x02000000
+#define TSTAT_CLEAR_THALT7 0x01000000
+
+/* Interrupt coalescing macros */
+#define IC_ICEN 0x80000000
+#define IC_ICFT_MASK 0x1fe00000
+#define IC_ICFT_SHIFT 21
+#define mk_ic_icft(x) \
+ (((unsigned int)x << IC_ICFT_SHIFT)&IC_ICFT_MASK)
+#define IC_ICTT_MASK 0x0000ffff
+#define mk_ic_ictt(x) (x&IC_ICTT_MASK)
+
+#define mk_ic_value(count, time) (IC_ICEN | \
+ mk_ic_icft(count) | \
+ mk_ic_ictt(time))
+#define get_icft_value(ic) (((unsigned long)ic & IC_ICFT_MASK) >> \
+ IC_ICFT_SHIFT)
+#define get_ictt_value(ic) ((unsigned long)ic & IC_ICTT_MASK)
+
+#define DEFAULT_TXIC mk_ic_value(DEFAULT_TXCOUNT, DEFAULT_TXTIME)
+#define DEFAULT_RXIC mk_ic_value(DEFAULT_RXCOUNT, DEFAULT_RXTIME)
+
+#define skip_bd(bdp, stride, base, ring_size) ({ \
+ typeof(bdp) new_bd = (bdp) + (stride); \
+ (new_bd >= (base) + (ring_size)) ? (new_bd - (ring_size)) : new_bd; })
+
+#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size)
+
+#define RCTRL_TS_ENABLE 0x01000000
+#define RCTRL_PAL_MASK 0x001f0000
+#define RCTRL_LFC 0x00004000
+#define RCTRL_VLEX 0x00002000
+#define RCTRL_FILREN 0x00001000
+#define RCTRL_GHTX 0x00000400
+#define RCTRL_IPCSEN 0x00000200
+#define RCTRL_TUCSEN 0x00000100
+#define RCTRL_PRSDEP_MASK 0x000000c0
+#define RCTRL_PRSDEP_INIT 0x000000c0
+#define RCTRL_PRSFM 0x00000020
+#define RCTRL_PROM 0x00000008
+#define RCTRL_EMEN 0x00000002
+#define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \
+ RCTRL_TUCSEN | RCTRL_FILREN)
+#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN | RCTRL_TUCSEN | \
+ RCTRL_PRSDEP_INIT)
+#define RCTRL_EXTHASH (RCTRL_GHTX)
+#define RCTRL_VLAN (RCTRL_PRSDEP_INIT)
+#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK)
+
+
+#define RSTAT_CLEAR_RHALT 0x00800000
+#define RSTAT_CLEAR_RXF0 0x00000080
+#define RSTAT_RXF_MASK 0x000000ff
+
+#define TCTRL_IPCSEN 0x00004000
+#define TCTRL_TUCSEN 0x00002000
+#define TCTRL_VLINS 0x00001000
+#define TCTRL_THDF 0x00000800
+#define TCTRL_RFCPAUSE 0x00000010
+#define TCTRL_TFCPAUSE 0x00000008
+#define TCTRL_TXSCHED_MASK 0x00000006
+#define TCTRL_TXSCHED_INIT 0x00000000
+/* priority scheduling */
+#define TCTRL_TXSCHED_PRIO 0x00000002
+/* weighted round-robin scheduling (WRRS) */
+#define TCTRL_TXSCHED_WRRS 0x00000004
+/* default WRRS weight and policy setting,
+ * tailored to the tr03wt and tr47wt registers:
+ * equal weight for all Tx Qs, measured in 64byte units
+ */
+#define DEFAULT_WRRS_WEIGHT 0x18181818
+
+#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
+
+#define IEVENT_INIT_CLEAR 0xffffffff
+#define IEVENT_BABR 0x80000000
+#define IEVENT_RXC 0x40000000
+#define IEVENT_BSY 0x20000000
+#define IEVENT_EBERR 0x10000000
+#define IEVENT_MSRO 0x04000000
+#define IEVENT_GTSC 0x02000000
+#define IEVENT_BABT 0x01000000
+#define IEVENT_TXC 0x00800000
+#define IEVENT_TXE 0x00400000
+#define IEVENT_TXB 0x00200000
+#define IEVENT_TXF 0x00100000
+#define IEVENT_LC 0x00040000
+#define IEVENT_CRL 0x00020000
+#define IEVENT_XFUN 0x00010000
+#define IEVENT_RXB0 0x00008000
+#define IEVENT_MAG 0x00000800
+#define IEVENT_GRSC 0x00000100
+#define IEVENT_RXF0 0x00000080
+#define IEVENT_FIR 0x00000008
+#define IEVENT_FIQ 0x00000004
+#define IEVENT_DPE 0x00000002
+#define IEVENT_PERR 0x00000001
+#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0 | IEVENT_BSY)
+#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
+#define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK)
+#define IEVENT_ERR_MASK \
+(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
+ IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
+ | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \
+ | IEVENT_MAG | IEVENT_BABR)
+
+#define IMASK_INIT_CLEAR 0x00000000
+#define IMASK_BABR 0x80000000
+#define IMASK_RXC 0x40000000
+#define IMASK_BSY 0x20000000
+#define IMASK_EBERR 0x10000000
+#define IMASK_MSRO 0x04000000
+#define IMASK_GTSC 0x02000000
+#define IMASK_BABT 0x01000000
+#define IMASK_TXC 0x00800000
+#define IMASK_TXEEN 0x00400000
+#define IMASK_TXBEN 0x00200000
+#define IMASK_TXFEN 0x00100000
+#define IMASK_LC 0x00040000
+#define IMASK_CRL 0x00020000
+#define IMASK_XFUN 0x00010000
+#define IMASK_RXB0 0x00008000
+#define IMASK_MAG 0x00000800
+#define IMASK_GRSC 0x00000100
+#define IMASK_RXFEN0 0x00000080
+#define IMASK_FIR 0x00000008
+#define IMASK_FIQ 0x00000004
+#define IMASK_DPE 0x00000002
+#define IMASK_PERR 0x00000001
+#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \
+ IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
+ IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
+ | IMASK_PERR)
+#define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY)
+#define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN)
+
+#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT)
+#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT)
+
+/* Fifo management */
+#define FIFO_TX_THR_MASK 0x01ff
+#define FIFO_TX_STARVE_MASK 0x01ff
+#define FIFO_TX_STARVE_OFF_MASK 0x01ff
+
+/* Attribute fields */
+
+/* This enables rx snooping for buffers and descriptors */
+#define ATTR_BDSTASH 0x00000800
+
+#define ATTR_BUFSTASH 0x00004000
+
+#define ATTR_SNOOPING 0x000000c0
+#define ATTR_INIT_SETTINGS ATTR_SNOOPING
+
+#define ATTRELI_INIT_SETTINGS 0x0
+#define ATTRELI_EL_MASK 0x3fff0000
+#define ATTRELI_EL(x) (x << 16)
+#define ATTRELI_EI_MASK 0x00003fff
+#define ATTRELI_EI(x) (x)
+
+#define BD_LFLAG(flags) ((flags) << 16)
+#define BD_LENGTH_MASK 0x0000ffff
+
+#define FPR_FILER_MASK 0xFFFFFFFF
+#define MAX_FILER_IDX 0xFF
+
+/* This default RIR value directly corresponds
+ * to the 3-bit hash value generated */
+#define DEFAULT_8RXQ_RIR0 0x05397700
+/* Map even hash values to Q0, and odd ones to Q1 */
+#define DEFAULT_2RXQ_RIR0 0x04104100
+
+/* RQFCR register bits */
+#define RQFCR_GPI 0x80000000
+#define RQFCR_HASHTBL_Q 0x00000000
+#define RQFCR_HASHTBL_0 0x00020000
+#define RQFCR_HASHTBL_1 0x00040000
+#define RQFCR_HASHTBL_2 0x00060000
+#define RQFCR_HASHTBL_3 0x00080000
+#define RQFCR_HASH 0x00010000
+#define RQFCR_QUEUE 0x0000FC00
+#define RQFCR_CLE 0x00000200
+#define RQFCR_RJE 0x00000100
+#define RQFCR_AND 0x00000080
+#define RQFCR_CMP_EXACT 0x00000000
+#define RQFCR_CMP_MATCH 0x00000020
+#define RQFCR_CMP_NOEXACT 0x00000040
+#define RQFCR_CMP_NOMATCH 0x00000060
+
+/* RQFCR PID values */
+#define RQFCR_PID_MASK 0x00000000
+#define RQFCR_PID_PARSE 0x00000001
+#define RQFCR_PID_ARB 0x00000002
+#define RQFCR_PID_DAH 0x00000003
+#define RQFCR_PID_DAL 0x00000004
+#define RQFCR_PID_SAH 0x00000005
+#define RQFCR_PID_SAL 0x00000006
+#define RQFCR_PID_ETY 0x00000007
+#define RQFCR_PID_VID 0x00000008
+#define RQFCR_PID_PRI 0x00000009
+#define RQFCR_PID_TOS 0x0000000A
+#define RQFCR_PID_L4P 0x0000000B
+#define RQFCR_PID_DIA 0x0000000C
+#define RQFCR_PID_SIA 0x0000000D
+#define RQFCR_PID_DPT 0x0000000E
+#define RQFCR_PID_SPT 0x0000000F
+
+/* RQFPR when PID is 0x0001 */
+#define RQFPR_HDR_GE_512 0x00200000
+#define RQFPR_LERR 0x00100000
+#define RQFPR_RAR 0x00080000
+#define RQFPR_RARQ 0x00040000
+#define RQFPR_AR 0x00020000
+#define RQFPR_ARQ 0x00010000
+#define RQFPR_EBC 0x00008000
+#define RQFPR_VLN 0x00004000
+#define RQFPR_CFI 0x00002000
+#define RQFPR_JUM 0x00001000
+#define RQFPR_IPF 0x00000800
+#define RQFPR_FIF 0x00000400
+#define RQFPR_IPV4 0x00000200
+#define RQFPR_IPV6 0x00000100
+#define RQFPR_ICC 0x00000080
+#define RQFPR_ICV 0x00000040
+#define RQFPR_TCP 0x00000020
+#define RQFPR_UDP 0x00000010
+#define RQFPR_TUC 0x00000008
+#define RQFPR_TUV 0x00000004
+#define RQFPR_PER 0x00000002
+#define RQFPR_EER 0x00000001
+
+/* TxBD status field bits */
+#define TXBD_READY 0x8000
+#define TXBD_PADCRC 0x4000
+#define TXBD_WRAP 0x2000
+#define TXBD_INTERRUPT 0x1000
+#define TXBD_LAST 0x0800
+#define TXBD_CRC 0x0400
+#define TXBD_DEF 0x0200
+#define TXBD_HUGEFRAME 0x0080
+#define TXBD_LATECOLLISION 0x0080
+#define TXBD_RETRYLIMIT 0x0040
+#define TXBD_RETRYCOUNTMASK 0x003c
+#define TXBD_UNDERRUN 0x0002
+#define TXBD_TOE 0x0002
+
+/* Tx FCB param bits */
+#define TXFCB_VLN 0x80
+#define TXFCB_IP 0x40
+#define TXFCB_IP6 0x20
+#define TXFCB_TUP 0x10
+#define TXFCB_UDP 0x08
+#define TXFCB_CIP 0x04
+#define TXFCB_CTU 0x02
+#define TXFCB_NPH 0x01
+#define TXFCB_DEFAULT (TXFCB_IP|TXFCB_TUP|TXFCB_CTU|TXFCB_NPH)
+
+/* RxBD status field bits */
+#define RXBD_EMPTY 0x8000
+#define RXBD_RO1 0x4000
+#define RXBD_WRAP 0x2000
+#define RXBD_INTERRUPT 0x1000
+#define RXBD_LAST 0x0800
+#define RXBD_FIRST 0x0400
+#define RXBD_MISS 0x0100
+#define RXBD_BROADCAST 0x0080
+#define RXBD_MULTICAST 0x0040
+#define RXBD_LARGE 0x0020
+#define RXBD_NONOCTET 0x0010
+#define RXBD_SHORT 0x0008
+#define RXBD_CRCERR 0x0004
+#define RXBD_OVERRUN 0x0002
+#define RXBD_TRUNCATED 0x0001
+#define RXBD_STATS 0x01ff
+#define RXBD_ERR (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET \
+ | RXBD_CRCERR | RXBD_OVERRUN \
+ | RXBD_TRUNCATED)
+
+/* Rx FCB status field bits */
+#define RXFCB_VLN 0x8000
+#define RXFCB_IP 0x4000
+#define RXFCB_IP6 0x2000
+#define RXFCB_TUP 0x1000
+#define RXFCB_CIP 0x0800
+#define RXFCB_CTU 0x0400
+#define RXFCB_EIP 0x0200
+#define RXFCB_ETU 0x0100
+#define RXFCB_CSUM_MASK 0x0f00
+#define RXFCB_PERR_MASK 0x000c
+#define RXFCB_PERR_BADL3 0x0008
+
+#define GFAR_INT_NAME_MAX (IFNAMSIZ + 6) /* '_g#_xx' */
+
+struct txbd8
+{
+ union {
+ struct {
+ __be16 status; /* Status Fields */
+ __be16 length; /* Buffer length */
+ };
+ __be32 lstatus;
+ };
+ __be32 bufPtr; /* Buffer Pointer */
+};
+
+struct txfcb {
+ u8 flags;
+ u8 ptp; /* Flag to enable tx timestamping */
+ u8 l4os; /* Level 4 Header Offset */
+ u8 l3os; /* Level 3 Header Offset */
+ __be16 phcs; /* Pseudo-header Checksum */
+ __be16 vlctl; /* VLAN control word */
+};
+
+struct rxbd8
+{
+ union {
+ struct {
+ __be16 status; /* Status Fields */
+ __be16 length; /* Buffer Length */
+ };
+ __be32 lstatus;
+ };
+ __be32 bufPtr; /* Buffer Pointer */
+};
+
+struct rxfcb {
+ __be16 flags;
+ u8 rq; /* Receive Queue index */
+ u8 pro; /* Layer 4 Protocol */
+ u16 reserved;
+ __be16 vlctl; /* VLAN control word */
+};
+
+struct gianfar_skb_cb {
+ unsigned int bytes_sent; /* bytes-on-wire (i.e. no FCB) */
+};
+
+#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
+
+struct rmon_mib
+{
+ u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */
+ u32 tr127; /* 0x.684 - Transmit and Receive 65-127 byte Frame Counter */
+ u32 tr255; /* 0x.688 - Transmit and Receive 128-255 byte Frame Counter */
+ u32 tr511; /* 0x.68c - Transmit and Receive 256-511 byte Frame Counter */
+ u32 tr1k; /* 0x.690 - Transmit and Receive 512-1023 byte Frame Counter */
+ u32 trmax; /* 0x.694 - Transmit and Receive 1024-1518 byte Frame Counter */
+ u32 trmgv; /* 0x.698 - Transmit and Receive 1519-1522 byte Good VLAN Frame */
+ u32 rbyt; /* 0x.69c - Receive Byte Counter */
+ u32 rpkt; /* 0x.6a0 - Receive Packet Counter */
+ u32 rfcs; /* 0x.6a4 - Receive FCS Error Counter */
+ u32 rmca; /* 0x.6a8 - Receive Multicast Packet Counter */
+ u32 rbca; /* 0x.6ac - Receive Broadcast Packet Counter */
+ u32 rxcf; /* 0x.6b0 - Receive Control Frame Packet Counter */
+ u32 rxpf; /* 0x.6b4 - Receive Pause Frame Packet Counter */
+ u32 rxuo; /* 0x.6b8 - Receive Unknown OP Code Counter */
+ u32 raln; /* 0x.6bc - Receive Alignment Error Counter */
+ u32 rflr; /* 0x.6c0 - Receive Frame Length Error Counter */
+ u32 rcde; /* 0x.6c4 - Receive Code Error Counter */
+ u32 rcse; /* 0x.6c8 - Receive Carrier Sense Error Counter */
+ u32 rund; /* 0x.6cc - Receive Undersize Packet Counter */
+ u32 rovr; /* 0x.6d0 - Receive Oversize Packet Counter */
+ u32 rfrg; /* 0x.6d4 - Receive Fragments Counter */
+ u32 rjbr; /* 0x.6d8 - Receive Jabber Counter */
+ u32 rdrp; /* 0x.6dc - Receive Drop Counter */
+ u32 tbyt; /* 0x.6e0 - Transmit Byte Counter Counter */
+ u32 tpkt; /* 0x.6e4 - Transmit Packet Counter */
+ u32 tmca; /* 0x.6e8 - Transmit Multicast Packet Counter */
+ u32 tbca; /* 0x.6ec - Transmit Broadcast Packet Counter */
+ u32 txpf; /* 0x.6f0 - Transmit Pause Control Frame Counter */
+ u32 tdfr; /* 0x.6f4 - Transmit Deferral Packet Counter */
+ u32 tedf; /* 0x.6f8 - Transmit Excessive Deferral Packet Counter */
+ u32 tscl; /* 0x.6fc - Transmit Single Collision Packet Counter */
+ u32 tmcl; /* 0x.700 - Transmit Multiple Collision Packet Counter */
+ u32 tlcl; /* 0x.704 - Transmit Late Collision Packet Counter */
+ u32 txcl; /* 0x.708 - Transmit Excessive Collision Packet Counter */
+ u32 tncl; /* 0x.70c - Transmit Total Collision Counter */
+ u8 res1[4];
+ u32 tdrp; /* 0x.714 - Transmit Drop Frame Counter */
+ u32 tjbr; /* 0x.718 - Transmit Jabber Frame Counter */
+ u32 tfcs; /* 0x.71c - Transmit FCS Error Counter */
+ u32 txcf; /* 0x.720 - Transmit Control Frame Counter */
+ u32 tovr; /* 0x.724 - Transmit Oversize Frame Counter */
+ u32 tund; /* 0x.728 - Transmit Undersize Frame Counter */
+ u32 tfrg; /* 0x.72c - Transmit Fragments Frame Counter */
+ u32 car1; /* 0x.730 - Carry Register One */
+ u32 car2; /* 0x.734 - Carry Register Two */
+ u32 cam1; /* 0x.738 - Carry Mask Register One */
+ u32 cam2; /* 0x.73c - Carry Mask Register Two */
+};
+
+struct gfar_extra_stats {
+ atomic64_t rx_large;
+ atomic64_t rx_short;
+ atomic64_t rx_nonoctet;
+ atomic64_t rx_crcerr;
+ atomic64_t rx_overrun;
+ atomic64_t rx_bsy;
+ atomic64_t rx_babr;
+ atomic64_t rx_trunc;
+ atomic64_t eberr;
+ atomic64_t tx_babt;
+ atomic64_t tx_underrun;
+ atomic64_t rx_skbmissing;
+ atomic64_t tx_timeout;
+};
+
+#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32))
+#define GFAR_EXTRA_STATS_LEN \
+ (sizeof(struct gfar_extra_stats)/sizeof(atomic64_t))
+
+/* Number of stats exported via ethtool */
+#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN)
+
+struct gfar {
+ u32 tsec_id; /* 0x.000 - Controller ID register */
+ u32 tsec_id2; /* 0x.004 - Controller ID2 register */
+ u8 res1[8];
+ u32 ievent; /* 0x.010 - Interrupt Event Register */
+ u32 imask; /* 0x.014 - Interrupt Mask Register */
+ u32 edis; /* 0x.018 - Error Disabled Register */
+ u32 emapg; /* 0x.01c - Group Error mapping register */
+ u32 ecntrl; /* 0x.020 - Ethernet Control Register */
+ u32 minflr; /* 0x.024 - Minimum Frame Length Register */
+ u32 ptv; /* 0x.028 - Pause Time Value Register */
+ u32 dmactrl; /* 0x.02c - DMA Control Register */
+ u32 tbipa; /* 0x.030 - TBI PHY Address Register */
+ u8 res2[28];
+ u32 fifo_rx_pause; /* 0x.050 - FIFO receive pause start threshold
+ register */
+ u32 fifo_rx_pause_shutoff; /* x.054 - FIFO receive starve shutoff
+ register */
+ u32 fifo_rx_alarm; /* 0x.058 - FIFO receive alarm start threshold
+ register */
+ u32 fifo_rx_alarm_shutoff; /*0x.05c - FIFO receive alarm starve
+ shutoff register */
+ u8 res3[44];
+ u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */
+ u8 res4[8];
+ u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */
+ u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */
+ u8 res5[96];
+ u32 tctrl; /* 0x.100 - Transmit Control Register */
+ u32 tstat; /* 0x.104 - Transmit Status Register */
+ u32 dfvlan; /* 0x.108 - Default VLAN Control word */
+ u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */
+ u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */
+ u32 tqueue; /* 0x.114 - Transmit queue control register */
+ u8 res7[40];
+ u32 tr03wt; /* 0x.140 - TxBD Rings 0-3 round-robin weightings */
+ u32 tr47wt; /* 0x.144 - TxBD Rings 4-7 round-robin weightings */
+ u8 res8[52];
+ u32 tbdbph; /* 0x.17c - Tx data buffer pointer high */
+ u8 res9a[4];
+ u32 tbptr0; /* 0x.184 - TxBD Pointer for ring 0 */
+ u8 res9b[4];
+ u32 tbptr1; /* 0x.18c - TxBD Pointer for ring 1 */
+ u8 res9c[4];
+ u32 tbptr2; /* 0x.194 - TxBD Pointer for ring 2 */
+ u8 res9d[4];
+ u32 tbptr3; /* 0x.19c - TxBD Pointer for ring 3 */
+ u8 res9e[4];
+ u32 tbptr4; /* 0x.1a4 - TxBD Pointer for ring 4 */
+ u8 res9f[4];
+ u32 tbptr5; /* 0x.1ac - TxBD Pointer for ring 5 */
+ u8 res9g[4];
+ u32 tbptr6; /* 0x.1b4 - TxBD Pointer for ring 6 */
+ u8 res9h[4];
+ u32 tbptr7; /* 0x.1bc - TxBD Pointer for ring 7 */
+ u8 res9[64];
+ u32 tbaseh; /* 0x.200 - TxBD base address high */
+ u32 tbase0; /* 0x.204 - TxBD Base Address of ring 0 */
+ u8 res10a[4];
+ u32 tbase1; /* 0x.20c - TxBD Base Address of ring 1 */
+ u8 res10b[4];
+ u32 tbase2; /* 0x.214 - TxBD Base Address of ring 2 */
+ u8 res10c[4];
+ u32 tbase3; /* 0x.21c - TxBD Base Address of ring 3 */
+ u8 res10d[4];
+ u32 tbase4; /* 0x.224 - TxBD Base Address of ring 4 */
+ u8 res10e[4];
+ u32 tbase5; /* 0x.22c - TxBD Base Address of ring 5 */
+ u8 res10f[4];
+ u32 tbase6; /* 0x.234 - TxBD Base Address of ring 6 */
+ u8 res10g[4];
+ u32 tbase7; /* 0x.23c - TxBD Base Address of ring 7 */
+ u8 res10[192];
+ u32 rctrl; /* 0x.300 - Receive Control Register */
+ u32 rstat; /* 0x.304 - Receive Status Register */
+ u8 res12[8];
+ u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */
+ u32 rqueue; /* 0x.314 - Receive queue control register */
+ u32 rir0; /* 0x.318 - Ring mapping register 0 */
+ u32 rir1; /* 0x.31c - Ring mapping register 1 */
+ u32 rir2; /* 0x.320 - Ring mapping register 2 */
+ u32 rir3; /* 0x.324 - Ring mapping register 3 */
+ u8 res13[8];
+ u32 rbifx; /* 0x.330 - Receive bit field extract control register */
+ u32 rqfar; /* 0x.334 - Receive queue filing table address register */
+ u32 rqfcr; /* 0x.338 - Receive queue filing table control register */
+ u32 rqfpr; /* 0x.33c - Receive queue filing table property register */
+ u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */
+ u8 res14[56];
+ u32 rbdbph; /* 0x.37c - Rx data buffer pointer high */
+ u8 res15a[4];
+ u32 rbptr0; /* 0x.384 - RxBD pointer for ring 0 */
+ u8 res15b[4];
+ u32 rbptr1; /* 0x.38c - RxBD pointer for ring 1 */
+ u8 res15c[4];
+ u32 rbptr2; /* 0x.394 - RxBD pointer for ring 2 */
+ u8 res15d[4];
+ u32 rbptr3; /* 0x.39c - RxBD pointer for ring 3 */
+ u8 res15e[4];
+ u32 rbptr4; /* 0x.3a4 - RxBD pointer for ring 4 */
+ u8 res15f[4];
+ u32 rbptr5; /* 0x.3ac - RxBD pointer for ring 5 */
+ u8 res15g[4];
+ u32 rbptr6; /* 0x.3b4 - RxBD pointer for ring 6 */
+ u8 res15h[4];
+ u32 rbptr7; /* 0x.3bc - RxBD pointer for ring 7 */
+ u8 res16[64];
+ u32 rbaseh; /* 0x.400 - RxBD base address high */
+ u32 rbase0; /* 0x.404 - RxBD base address of ring 0 */
+ u8 res17a[4];
+ u32 rbase1; /* 0x.40c - RxBD base address of ring 1 */
+ u8 res17b[4];
+ u32 rbase2; /* 0x.414 - RxBD base address of ring 2 */
+ u8 res17c[4];
+ u32 rbase3; /* 0x.41c - RxBD base address of ring 3 */
+ u8 res17d[4];
+ u32 rbase4; /* 0x.424 - RxBD base address of ring 4 */
+ u8 res17e[4];
+ u32 rbase5; /* 0x.42c - RxBD base address of ring 5 */
+ u8 res17f[4];
+ u32 rbase6; /* 0x.434 - RxBD base address of ring 6 */
+ u8 res17g[4];
+ u32 rbase7; /* 0x.43c - RxBD base address of ring 7 */
+ u8 res17[192];
+ u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */
+ u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */
+ u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */
+ u32 hafdup; /* 0x.50c - Half Duplex Register */
+ u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
+ u8 res18[12];
+ u8 gfar_mii_regs[24]; /* See gianfar_phy.h */
+ u32 ifctrl; /* 0x.538 - Interface control register */
+ u32 ifstat; /* 0x.53c - Interface Status Register */
+ u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
+ u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */
+ u32 mac01addr1; /* 0x.548 - MAC exact match address 1, part 1 */
+ u32 mac01addr2; /* 0x.54c - MAC exact match address 1, part 2 */
+ u32 mac02addr1; /* 0x.550 - MAC exact match address 2, part 1 */
+ u32 mac02addr2; /* 0x.554 - MAC exact match address 2, part 2 */
+ u32 mac03addr1; /* 0x.558 - MAC exact match address 3, part 1 */
+ u32 mac03addr2; /* 0x.55c - MAC exact match address 3, part 2 */
+ u32 mac04addr1; /* 0x.560 - MAC exact match address 4, part 1 */
+ u32 mac04addr2; /* 0x.564 - MAC exact match address 4, part 2 */
+ u32 mac05addr1; /* 0x.568 - MAC exact match address 5, part 1 */
+ u32 mac05addr2; /* 0x.56c - MAC exact match address 5, part 2 */
+ u32 mac06addr1; /* 0x.570 - MAC exact match address 6, part 1 */
+ u32 mac06addr2; /* 0x.574 - MAC exact match address 6, part 2 */
+ u32 mac07addr1; /* 0x.578 - MAC exact match address 7, part 1 */
+ u32 mac07addr2; /* 0x.57c - MAC exact match address 7, part 2 */
+ u32 mac08addr1; /* 0x.580 - MAC exact match address 8, part 1 */
+ u32 mac08addr2; /* 0x.584 - MAC exact match address 8, part 2 */
+ u32 mac09addr1; /* 0x.588 - MAC exact match address 9, part 1 */
+ u32 mac09addr2; /* 0x.58c - MAC exact match address 9, part 2 */
+ u32 mac10addr1; /* 0x.590 - MAC exact match address 10, part 1*/
+ u32 mac10addr2; /* 0x.594 - MAC exact match address 10, part 2*/
+ u32 mac11addr1; /* 0x.598 - MAC exact match address 11, part 1*/
+ u32 mac11addr2; /* 0x.59c - MAC exact match address 11, part 2*/
+ u32 mac12addr1; /* 0x.5a0 - MAC exact match address 12, part 1*/
+ u32 mac12addr2; /* 0x.5a4 - MAC exact match address 12, part 2*/
+ u32 mac13addr1; /* 0x.5a8 - MAC exact match address 13, part 1*/
+ u32 mac13addr2; /* 0x.5ac - MAC exact match address 13, part 2*/
+ u32 mac14addr1; /* 0x.5b0 - MAC exact match address 14, part 1*/
+ u32 mac14addr2; /* 0x.5b4 - MAC exact match address 14, part 2*/
+ u32 mac15addr1; /* 0x.5b8 - MAC exact match address 15, part 1*/
+ u32 mac15addr2; /* 0x.5bc - MAC exact match address 15, part 2*/
+ u8 res20[192];
+ struct rmon_mib rmon; /* 0x.680-0x.73c */
+ u32 rrej; /* 0x.740 - Receive filer rejected packet counter */
+ u8 res21[188];
+ u32 igaddr0; /* 0x.800 - Indivdual/Group address register 0*/
+ u32 igaddr1; /* 0x.804 - Indivdual/Group address register 1*/
+ u32 igaddr2; /* 0x.808 - Indivdual/Group address register 2*/
+ u32 igaddr3; /* 0x.80c - Indivdual/Group address register 3*/
+ u32 igaddr4; /* 0x.810 - Indivdual/Group address register 4*/
+ u32 igaddr5; /* 0x.814 - Indivdual/Group address register 5*/
+ u32 igaddr6; /* 0x.818 - Indivdual/Group address register 6*/
+ u32 igaddr7; /* 0x.81c - Indivdual/Group address register 7*/
+ u8 res22[96];
+ u32 gaddr0; /* 0x.880 - Group address register 0 */
+ u32 gaddr1; /* 0x.884 - Group address register 1 */
+ u32 gaddr2; /* 0x.888 - Group address register 2 */
+ u32 gaddr3; /* 0x.88c - Group address register 3 */
+ u32 gaddr4; /* 0x.890 - Group address register 4 */
+ u32 gaddr5; /* 0x.894 - Group address register 5 */
+ u32 gaddr6; /* 0x.898 - Group address register 6 */
+ u32 gaddr7; /* 0x.89c - Group address register 7 */
+ u8 res23a[352];
+ u32 fifocfg; /* 0x.a00 - FIFO interface config register */
+ u8 res23b[252];
+ u8 res23c[248];
+ u32 attr; /* 0x.bf8 - Attributes Register */
+ u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
+ u32 rqprm0; /* 0x.c00 - Receive queue parameters register 0 */
+ u32 rqprm1; /* 0x.c04 - Receive queue parameters register 1 */
+ u32 rqprm2; /* 0x.c08 - Receive queue parameters register 2 */
+ u32 rqprm3; /* 0x.c0c - Receive queue parameters register 3 */
+ u32 rqprm4; /* 0x.c10 - Receive queue parameters register 4 */
+ u32 rqprm5; /* 0x.c14 - Receive queue parameters register 5 */
+ u32 rqprm6; /* 0x.c18 - Receive queue parameters register 6 */
+ u32 rqprm7; /* 0x.c1c - Receive queue parameters register 7 */
+ u8 res24[36];
+ u32 rfbptr0; /* 0x.c44 - Last free RxBD pointer for ring 0 */
+ u8 res24a[4];
+ u32 rfbptr1; /* 0x.c4c - Last free RxBD pointer for ring 1 */
+ u8 res24b[4];
+ u32 rfbptr2; /* 0x.c54 - Last free RxBD pointer for ring 2 */
+ u8 res24c[4];
+ u32 rfbptr3; /* 0x.c5c - Last free RxBD pointer for ring 3 */
+ u8 res24d[4];
+ u32 rfbptr4; /* 0x.c64 - Last free RxBD pointer for ring 4 */
+ u8 res24e[4];
+ u32 rfbptr5; /* 0x.c6c - Last free RxBD pointer for ring 5 */
+ u8 res24f[4];
+ u32 rfbptr6; /* 0x.c74 - Last free RxBD pointer for ring 6 */
+ u8 res24g[4];
+ u32 rfbptr7; /* 0x.c7c - Last free RxBD pointer for ring 7 */
+ u8 res24h[4];
+ u8 res24x[556];
+ u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */
+ u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */
+ u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */
+ u32 isrg3; /* 0x.ebc - Interrupt steering group 3 register */
+ u8 res25[16];
+ u32 rxic0; /* 0x.ed0 - Ring 0 Rx interrupt coalescing */
+ u32 rxic1; /* 0x.ed4 - Ring 1 Rx interrupt coalescing */
+ u32 rxic2; /* 0x.ed8 - Ring 2 Rx interrupt coalescing */
+ u32 rxic3; /* 0x.edc - Ring 3 Rx interrupt coalescing */
+ u32 rxic4; /* 0x.ee0 - Ring 4 Rx interrupt coalescing */
+ u32 rxic5; /* 0x.ee4 - Ring 5 Rx interrupt coalescing */
+ u32 rxic6; /* 0x.ee8 - Ring 6 Rx interrupt coalescing */
+ u32 rxic7; /* 0x.eec - Ring 7 Rx interrupt coalescing */
+ u8 res26[32];
+ u32 txic0; /* 0x.f10 - Ring 0 Tx interrupt coalescing */
+ u32 txic1; /* 0x.f14 - Ring 1 Tx interrupt coalescing */
+ u32 txic2; /* 0x.f18 - Ring 2 Tx interrupt coalescing */
+ u32 txic3; /* 0x.f1c - Ring 3 Tx interrupt coalescing */
+ u32 txic4; /* 0x.f20 - Ring 4 Tx interrupt coalescing */
+ u32 txic5; /* 0x.f24 - Ring 5 Tx interrupt coalescing */
+ u32 txic6; /* 0x.f28 - Ring 6 Tx interrupt coalescing */
+ u32 txic7; /* 0x.f2c - Ring 7 Tx interrupt coalescing */
+ u8 res27[208];
+};
+
+/* Flags related to gianfar device features */
+#define FSL_GIANFAR_DEV_HAS_GIGABIT 0x00000001
+#define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002
+#define FSL_GIANFAR_DEV_HAS_RMON 0x00000004
+#define FSL_GIANFAR_DEV_HAS_MULTI_INTR 0x00000008
+#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
+#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
+#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
+#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
+#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
+#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
+#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
+
+#if (MAXGROUPS == 2)
+#define DEFAULT_MAPPING 0xAA
+#else
+#define DEFAULT_MAPPING 0xFF
+#endif
+
+#define ISRG_RR0 0x80000000
+#define ISRG_TR0 0x00800000
+
+/* The same driver can operate in two modes */
+/* SQ_SG_MODE: Single Queue Single Group Mode
+ * (Backward compatible mode)
+ * MQ_MG_MODE: Multi Queue Multi Group mode
+ */
+enum {
+ SQ_SG_MODE = 0,
+ MQ_MG_MODE
+};
+
+/* GFAR_SQ_POLLING: Single Queue NAPI polling mode
+ * The driver supports a single pair of RX/Tx queues
+ * per interrupt group (Rx/Tx int line). MQ_MG mode
+ * devices have 2 interrupt groups, so the device will
+ * have a total of 2 Tx and 2 Rx queues in this case.
+ * GFAR_MQ_POLLING: Multi Queue NAPI polling mode
+ * The driver supports all the 8 Rx and Tx HW queues
+ * each queue mapped by the Device Tree to one of
+ * the 2 interrupt groups. This mode implies significant
+ * processing overhead (CPU and controller level).
+ */
+enum gfar_poll_mode {
+ GFAR_SQ_POLLING = 0,
+ GFAR_MQ_POLLING
+};
+
+/*
+ * Per TX queue stats
+ */
+struct tx_q_stats {
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
+/**
+ * struct gfar_priv_tx_q - per tx queue structure
+ * @txlock: per queue tx spin lock
+ * @tx_skbuff:skb pointers
+ * @skb_curtx: to be used skb pointer
+ * @skb_dirtytx:the last used skb pointer
+ * @stats: bytes/packets stats
+ * @qindex: index of this queue
+ * @dev: back pointer to the dev structure
+ * @grp: back pointer to the group to which this queue belongs
+ * @tx_bd_base: First tx buffer descriptor
+ * @cur_tx: Next free ring entry
+ * @dirty_tx: First buffer in line to be transmitted
+ * @tx_ring_size: Tx ring size
+ * @num_txbdfree: number of free TxBds
+ * @txcoalescing: enable/disable tx coalescing
+ * @txic: transmit interrupt coalescing value
+ * @txcount: coalescing value if based on tx frame count
+ * @txtime: coalescing value if based on time
+ */
+struct gfar_priv_tx_q {
+ /* cacheline 1 */
+ spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
+ struct txbd8 *tx_bd_base;
+ struct txbd8 *cur_tx;
+ unsigned int num_txbdfree;
+ unsigned short skb_curtx;
+ unsigned short tx_ring_size;
+ struct tx_q_stats stats;
+ struct gfar_priv_grp *grp;
+ /* cacheline 2 */
+ struct net_device *dev;
+ struct sk_buff **tx_skbuff;
+ struct txbd8 *dirty_tx;
+ unsigned short skb_dirtytx;
+ unsigned short qindex;
+ /* Configuration info for the coalescing features */
+ unsigned int txcoalescing;
+ unsigned long txic;
+ dma_addr_t tx_bd_dma_base;
+};
+
+/*
+ * Per RX queue stats
+ */
+struct rx_q_stats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long rx_dropped;
+};
+
+/**
+ * struct gfar_priv_rx_q - per rx queue structure
+ * @rx_skbuff: skb pointers
+ * @skb_currx: currently use skb pointer
+ * @rx_bd_base: First rx buffer descriptor
+ * @cur_rx: Next free rx ring entry
+ * @qindex: index of this queue
+ * @dev: back pointer to the dev structure
+ * @rx_ring_size: Rx ring size
+ * @rxcoalescing: enable/disable rx-coalescing
+ * @rxic: receive interrupt coalescing vlaue
+ */
+
+struct gfar_priv_rx_q {
+ struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
+ dma_addr_t rx_bd_dma_base;
+ struct rxbd8 *rx_bd_base;
+ struct rxbd8 *cur_rx;
+ struct net_device *dev;
+ struct gfar_priv_grp *grp;
+ struct rx_q_stats stats;
+ u16 skb_currx;
+ u16 qindex;
+ unsigned int rx_ring_size;
+ /* RX Coalescing values */
+ unsigned char rxcoalescing;
+ unsigned long rxic;
+ u32 __iomem *rfbptr;
+};
+
+enum gfar_irqinfo_id {
+ GFAR_TX = 0,
+ GFAR_RX = 1,
+ GFAR_ER = 2,
+ GFAR_NUM_IRQS = 3
+};
+
+struct gfar_irqinfo {
+ unsigned int irq;
+ char name[GFAR_INT_NAME_MAX];
+};
+
+/**
+ * struct gfar_priv_grp - per group structure
+ * @napi: the napi poll function
+ * @priv: back pointer to the priv structure
+ * @regs: the ioremapped register space for this group
+ * @irqinfo: TX/RX/ER irq data for this group
+ */
+
+struct gfar_priv_grp {
+ spinlock_t grplock __aligned(SMP_CACHE_BYTES);
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
+ struct gfar __iomem *regs;
+ struct gfar_priv_tx_q *tx_queue;
+ struct gfar_priv_rx_q *rx_queue;
+ unsigned int tstat;
+ unsigned int rstat;
+
+ struct gfar_private *priv;
+ unsigned long num_tx_queues;
+ unsigned long tx_bit_map;
+ unsigned long num_rx_queues;
+ unsigned long rx_bit_map;
+
+ struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
+};
+
+#define gfar_irq(grp, ID) \
+ ((grp)->irqinfo[GFAR_##ID])
+
+enum gfar_errata {
+ GFAR_ERRATA_74 = 0x01,
+ GFAR_ERRATA_76 = 0x02,
+ GFAR_ERRATA_A002 = 0x04,
+ GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
+};
+
+enum gfar_dev_state {
+ GFAR_DOWN = 1,
+ GFAR_RESETTING
+};
+
+/* Struct stolen almost completely (and shamelessly) from the FCC enet source
+ * (Ok, that's not so true anymore, but there is a family resemblance)
+ * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
+ * and tx_bd_base always point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct gfar_private {
+ struct device *dev;
+ struct net_device *ndev;
+ enum gfar_errata errata;
+ unsigned int rx_buffer_size;
+
+ u16 uses_rxfcb;
+ u16 padding;
+ u32 device_flags;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+ int hwts_tx_en;
+
+ struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
+ struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
+ struct gfar_priv_grp gfargrp[MAXGROUPS];
+
+ unsigned long state;
+
+ unsigned short mode;
+ unsigned short poll_mode;
+ unsigned int num_tx_queues;
+ unsigned int num_rx_queues;
+ unsigned int num_grps;
+ int tx_actual_en;
+
+ /* Network Statistics */
+ struct gfar_extra_stats extra_stats;
+
+ /* PHY stuff */
+ phy_interface_t interface;
+ struct device_node *phy_node;
+ struct device_node *tbi_node;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+
+ /* Bitfield update lock */
+ spinlock_t bflock;
+
+ uint32_t msg_enable;
+
+ struct work_struct reset_task;
+
+ struct platform_device *ofdev;
+ unsigned char
+ extended_hash:1,
+ bd_stash_en:1,
+ rx_filer_enable:1,
+ /* Wake-on-LAN enabled */
+ wol_en:1,
+ /* Enable priorty based Tx scheduling in Hw */
+ prio_sched_en:1,
+ /* Flow control flags */
+ pause_aneg_en:1,
+ tx_pause_en:1,
+ rx_pause_en:1;
+
+ /* The total tx and rx ring size for the enabled queues */
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
+
+ u32 rqueue;
+ u32 tqueue;
+
+ /* RX per device parameters */
+ unsigned int rx_stash_size;
+ unsigned int rx_stash_index;
+
+ u32 cur_filer_idx;
+
+ /* RX queue filer rule set*/
+ struct ethtool_rx_list rx_list;
+ struct mutex rx_queue_access;
+
+ /* Hash registers and their width */
+ u32 __iomem *hash_regs[16];
+ int hash_width;
+
+ /*Filer table*/
+ unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
+ unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
+};
+
+
+static inline int gfar_has_errata(struct gfar_private *priv,
+ enum gfar_errata err)
+{
+ return priv->errata & err;
+}
+
+static inline u32 gfar_read(unsigned __iomem *addr)
+{
+ u32 val;
+ val = ioread32be(addr);
+ return val;
+}
+
+static inline void gfar_write(unsigned __iomem *addr, u32 val)
+{
+ iowrite32be(val, addr);
+}
+
+static inline void gfar_write_filer(struct gfar_private *priv,
+ unsigned int far, unsigned int fcr, unsigned int fpr)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ gfar_write(&regs->rqfar, far);
+ gfar_write(&regs->rqfcr, fcr);
+ gfar_write(&regs->rqfpr, fpr);
+}
+
+static inline void gfar_read_filer(struct gfar_private *priv,
+ unsigned int far, unsigned int *fcr, unsigned int *fpr)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ gfar_write(&regs->rqfar, far);
+ *fcr = gfar_read(&regs->rqfcr);
+ *fpr = gfar_read(&regs->rqfpr);
+}
+
+static inline void gfar_write_isrg(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr = &regs->isrg0;
+ u32 isrg = 0;
+ int grp_idx, i;
+
+ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx];
+
+ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+ isrg |= (ISRG_RR0 >> i);
+ }
+
+ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+ isrg |= (ISRG_TR0 >> i);
+ }
+
+ gfar_write(baddr, isrg);
+
+ baddr++;
+ isrg = 0;
+ }
+}
+
+static inline int gfar_is_dma_stopped(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ return ((gfar_read(&regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)) ==
+ (IEVENT_GRSC | IEVENT_GTSC));
+}
+
+static inline int gfar_is_rx_dma_stopped(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ return gfar_read(&regs->ievent) & IEVENT_GRSC;
+}
+
+static inline void gfar_wmb(void)
+{
+#if defined(CONFIG_PPC)
+ /* The powerpc-specific eieio() is used, as wmb() has too strong
+ * semantics (it requires synchronization between cacheable and
+ * uncacheable mappings, which eieio() doesn't provide and which we
+ * don't need), thus requiring a more expensive sync instruction. At
+ * some point, the set of architecture-independent barrier functions
+ * should be expanded to include weaker barriers.
+ */
+ eieio();
+#else
+ wmb(); /* order write acesses for BD (or FCB) fields */
+#endif
+}
+
+static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
+{
+ u32 lstatus = be32_to_cpu(bdp->lstatus);
+
+ lstatus &= BD_LFLAG(TXBD_WRAP);
+ bdp->lstatus = cpu_to_be32(lstatus);
+}
+
+irqreturn_t gfar_receive(int irq, void *dev_id);
+int startup_gfar(struct net_device *dev);
+void stop_gfar(struct net_device *dev);
+void reset_gfar(struct net_device *dev);
+void gfar_mac_reset(struct gfar_private *priv);
+void gfar_halt(struct gfar_private *priv);
+void gfar_start(struct gfar_private *priv);
+void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
+ u32 regnum, u32 read);
+void gfar_configure_coalescing_all(struct gfar_private *priv);
+int gfar_set_features(struct net_device *dev, netdev_features_t features);
+
+extern const struct ethtool_ops gfar_ethtool_ops;
+
+#define MAX_FILER_CACHE_IDX (2*(MAX_FILER_IDX))
+
+#define RQFCR_PID_PRI_MASK 0xFFFFFFF8
+#define RQFCR_PID_L4P_MASK 0xFFFFFF00
+#define RQFCR_PID_VID_MASK 0xFFFFF000
+#define RQFCR_PID_PORT_MASK 0xFFFF0000
+#define RQFCR_PID_MAC_MASK 0xFF000000
+
+struct gfar_mask_entry {
+ unsigned int mask; /* The mask value which is valid form start to end */
+ unsigned int start;
+ unsigned int end;
+ unsigned int block; /* Same block values indicate depended entries */
+};
+
+/* Represents a receive filer table entry */
+struct gfar_filer_entry {
+ u32 ctrl;
+ u32 prop;
+};
+
+
+/* The 20 additional entries are a shadow for one extra element */
+struct filer_table {
+ u32 index;
+ struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
+};
+
+/* The gianfar_ptp module will set this variable */
+extern int gfar_phc_index;
+
+#endif /* __GIANFAR_H */
diff --git a/kernel/drivers/net/ethernet/freescale/gianfar_ethtool.c b/kernel/drivers/net/ethernet/freescale/gianfar_ethtool.c
new file mode 100644
index 000000000..fda12fb32
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -0,0 +1,1911 @@
+/*
+ * drivers/net/ethernet/freescale/gianfar_ethtool.c
+ *
+ * Gianfar Ethernet Driver
+ * Ethtool support for Gianfar Enet
+ * Based on e1000 ethtool support
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala
+ * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
+ *
+ * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
+ *
+ * This software may be used and distributed according to
+ * the terms of the GNU Public License, Version 2, incorporated herein
+ * by reference.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <asm/types.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/sort.h>
+#include <linux/if_vlan.h>
+
+#include "gianfar.h"
+
+#define GFAR_MAX_COAL_USECS 0xffff
+#define GFAR_MAX_COAL_FRAMES 0xff
+static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 *buf);
+static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
+static int gfar_gcoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals);
+static int gfar_scoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals);
+static void gfar_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals);
+static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals);
+static void gfar_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo);
+
+static const char stat_gstrings[][ETH_GSTRING_LEN] = {
+ "rx-large-frame-errors",
+ "rx-short-frame-errors",
+ "rx-non-octet-errors",
+ "rx-crc-errors",
+ "rx-overrun-errors",
+ "rx-busy-errors",
+ "rx-babbling-errors",
+ "rx-truncated-frames",
+ "ethernet-bus-error",
+ "tx-babbling-errors",
+ "tx-underrun-errors",
+ "rx-skb-missing-errors",
+ "tx-timeout-errors",
+ "tx-rx-64-frames",
+ "tx-rx-65-127-frames",
+ "tx-rx-128-255-frames",
+ "tx-rx-256-511-frames",
+ "tx-rx-512-1023-frames",
+ "tx-rx-1024-1518-frames",
+ "tx-rx-1519-1522-good-vlan",
+ "rx-bytes",
+ "rx-packets",
+ "rx-fcs-errors",
+ "receive-multicast-packet",
+ "receive-broadcast-packet",
+ "rx-control-frame-packets",
+ "rx-pause-frame-packets",
+ "rx-unknown-op-code",
+ "rx-alignment-error",
+ "rx-frame-length-error",
+ "rx-code-error",
+ "rx-carrier-sense-error",
+ "rx-undersize-packets",
+ "rx-oversize-packets",
+ "rx-fragmented-frames",
+ "rx-jabber-frames",
+ "rx-dropped-frames",
+ "tx-byte-counter",
+ "tx-packets",
+ "tx-multicast-packets",
+ "tx-broadcast-packets",
+ "tx-pause-control-frames",
+ "tx-deferral-packets",
+ "tx-excessive-deferral-packets",
+ "tx-single-collision-packets",
+ "tx-multiple-collision-packets",
+ "tx-late-collision-packets",
+ "tx-excessive-collision-packets",
+ "tx-total-collision",
+ "reserved",
+ "tx-dropped-frames",
+ "tx-jabber-frames",
+ "tx-fcs-errors",
+ "tx-control-frames",
+ "tx-oversize-frames",
+ "tx-undersize-frames",
+ "tx-fragmented-frames",
+};
+
+/* Fill in a buffer with the strings which correspond to the
+ * stats */
+static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
+ memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
+ else
+ memcpy(buf, stat_gstrings,
+ GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+/* Fill in an array of 64-bit statistics from various sources.
+ * This array will be appended to the end of the ethtool_stats
+ * structure, and returned to user space
+ */
+static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 *buf)
+{
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+ buf[i] = atomic64_read(&extra[i]);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
+
+ for (; i < GFAR_STATS_LEN; i++, rmon++)
+ buf[i] = (u64) gfar_read(rmon);
+ }
+}
+
+static int gfar_sset_count(struct net_device *dev, int sset)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
+ return GFAR_STATS_LEN;
+ else
+ return GFAR_EXTRA_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Fills in the drvinfo structure with some basic info */
+static void gfar_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, gfar_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+
+static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (NULL == phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+
+/* Return the current settings in the ethtool_cmd structure */
+static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+
+ if (NULL == phydev)
+ return -ENODEV;
+ tx_queue = priv->tx_queue[0];
+ rx_queue = priv->rx_queue[0];
+
+ /* etsec-1.7 and older versions have only one txic
+ * and rxic regs although they support multiple queues */
+ cmd->maxtxpkt = get_icft_value(tx_queue->txic);
+ cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+/* Return the length of the register structure */
+static int gfar_reglen(struct net_device *dev)
+{
+ return sizeof (struct gfar);
+}
+
+/* Return a dump of the GFAR register space */
+static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *regbuf)
+{
+ int i;
+ struct gfar_private *priv = netdev_priv(dev);
+ u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
+ u32 *buf = (u32 *) regbuf;
+
+ for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
+ buf[i] = gfar_read(&theregs[i]);
+}
+
+/* Convert microseconds to ethernet clock ticks, which changes
+ * depending on what speed the controller is running at */
+static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
+ unsigned int usecs)
+{
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (priv->phydev->speed) {
+ case SPEED_1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case SPEED_100:
+ count = GFAR_100_TIME;
+ break;
+ case SPEED_10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0
+ * if usecs > 0 */
+ return (usecs * 1000 + count - 1) / count;
+}
+
+/* Convert ethernet clock ticks to microseconds */
+static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
+ unsigned int ticks)
+{
+ unsigned int count;
+
+ /* The timer is different, depending on the interface speed */
+ switch (priv->phydev->speed) {
+ case SPEED_1000:
+ count = GFAR_GBIT_TIME;
+ break;
+ case SPEED_100:
+ count = GFAR_100_TIME;
+ break;
+ case SPEED_10:
+ default:
+ count = GFAR_10_TIME;
+ break;
+ }
+
+ /* Make sure we return a number greater than 0 */
+ /* if ticks is > 0 */
+ return (ticks * count) / 1000;
+}
+
+/* Get the coalescing parameters, and put them in the cvals
+ * structure. */
+static int gfar_gcoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_priv_rx_q *rx_queue = NULL;
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ unsigned long rxtime;
+ unsigned long rxcount;
+ unsigned long txtime;
+ unsigned long txcount;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
+ return -EOPNOTSUPP;
+
+ if (NULL == priv->phydev)
+ return -ENODEV;
+
+ rx_queue = priv->rx_queue[0];
+ tx_queue = priv->tx_queue[0];
+
+ rxtime = get_ictt_value(rx_queue->rxic);
+ rxcount = get_icft_value(rx_queue->rxic);
+ txtime = get_ictt_value(tx_queue->txic);
+ txcount = get_icft_value(tx_queue->txic);
+ cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
+ cvals->rx_max_coalesced_frames = rxcount;
+
+ cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
+ cvals->tx_max_coalesced_frames = txcount;
+
+ cvals->use_adaptive_rx_coalesce = 0;
+ cvals->use_adaptive_tx_coalesce = 0;
+
+ cvals->pkt_rate_low = 0;
+ cvals->rx_coalesce_usecs_low = 0;
+ cvals->rx_max_coalesced_frames_low = 0;
+ cvals->tx_coalesce_usecs_low = 0;
+ cvals->tx_max_coalesced_frames_low = 0;
+
+ /* When the packet rate is below pkt_rate_high but above
+ * pkt_rate_low (both measured in packets per second) the
+ * normal {rx,tx}_* coalescing parameters are used.
+ */
+
+ /* When the packet rate is (measured in packets per second)
+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+ * used.
+ */
+ cvals->pkt_rate_high = 0;
+ cvals->rx_coalesce_usecs_high = 0;
+ cvals->rx_max_coalesced_frames_high = 0;
+ cvals->tx_coalesce_usecs_high = 0;
+ cvals->tx_max_coalesced_frames_high = 0;
+
+ /* How often to do adaptive coalescing packet rate sampling,
+ * measured in seconds. Must not be zero.
+ */
+ cvals->rate_sample_interval = 0;
+
+ return 0;
+}
+
+/* Change the coalescing values.
+ * Both cvals->*_usecs and cvals->*_frames have to be > 0
+ * in order for coalescing to be active
+ */
+static int gfar_scoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int i, err = 0;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
+ return -EOPNOTSUPP;
+
+ if (NULL == priv->phydev)
+ return -ENODEV;
+
+ /* Check the bounds of the values */
+ if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ netdev_info(dev, "Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ netdev_info(dev, "Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
+ /* Check the bounds of the values */
+ if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ netdev_info(dev, "Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ netdev_info(dev, "Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ /* Set up rx coalescing */
+ if ((cvals->rx_coalesce_usecs == 0) ||
+ (cvals->rx_max_coalesced_frames == 0)) {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 0;
+ } else {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 1;
+ }
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i]->rxic = mk_ic_value(
+ cvals->rx_max_coalesced_frames,
+ gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
+ }
+
+ /* Set up tx coalescing */
+ if ((cvals->tx_coalesce_usecs == 0) ||
+ (cvals->tx_max_coalesced_frames == 0)) {
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i]->txcoalescing = 0;
+ } else {
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i]->txcoalescing = 1;
+ }
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i]->txic = mk_ic_value(
+ cvals->tx_max_coalesced_frames,
+ gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
+ }
+
+ if (dev->flags & IFF_UP) {
+ stop_gfar(dev);
+ err = startup_gfar(dev);
+ } else {
+ gfar_mac_reset(priv);
+ }
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+ return err;
+}
+
+/* Fills in rvals with the current ring parameters. Currently,
+ * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
+ * jumbo are ignored by the driver */
+static void gfar_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_priv_tx_q *tx_queue = NULL;
+ struct gfar_priv_rx_q *rx_queue = NULL;
+
+ tx_queue = priv->tx_queue[0];
+ rx_queue = priv->rx_queue[0];
+
+ rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
+ rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
+
+ /* Values changeable by the user. The valid values are
+ * in the range 1 to the "*_max_pending" counterpart above.
+ */
+ rvals->rx_pending = rx_queue->rx_ring_size;
+ rvals->rx_mini_pending = rx_queue->rx_ring_size;
+ rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
+ rvals->tx_pending = tx_queue->tx_ring_size;
+}
+
+/* Change the current ring parameters, stopping the controller if
+ * necessary so that we don't mess things up while we're in motion.
+ */
+static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int err = 0, i;
+
+ if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->rx_pending)) {
+ netdev_err(dev, "Ring sizes must be a power of 2\n");
+ return -EINVAL;
+ }
+
+ if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(rvals->tx_pending)) {
+ netdev_err(dev, "Ring sizes must be a power of 2\n");
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ if (dev->flags & IFF_UP)
+ stop_gfar(dev);
+
+ /* Change the sizes */
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
+ priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
+
+ /* Rebuild the rings with the new size */
+ if (dev->flags & IFF_UP)
+ err = startup_gfar(dev);
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+ return err;
+}
+
+static void gfar_gpauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ epause->autoneg = !!priv->pause_aneg_en;
+ epause->rx_pause = !!priv->rx_pause_en;
+ epause->tx_pause = !!priv->tx_pause_en;
+}
+
+static int gfar_spauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 oldadv, newadv;
+
+ if (!phydev)
+ return -ENODEV;
+
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ (epause->rx_pause != epause->tx_pause)))
+ return -EINVAL;
+
+ priv->rx_pause_en = priv->tx_pause_en = 0;
+ if (epause->rx_pause) {
+ priv->rx_pause_en = 1;
+
+ if (epause->tx_pause) {
+ priv->tx_pause_en = 1;
+ /* FLOW_CTRL_RX & TX */
+ newadv = ADVERTISED_Pause;
+ } else /* FLOW_CTLR_RX */
+ newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ } else if (epause->tx_pause) {
+ priv->tx_pause_en = 1;
+ /* FLOW_CTLR_TX */
+ newadv = ADVERTISED_Asym_Pause;
+ } else
+ newadv = 0;
+
+ if (epause->autoneg)
+ priv->pause_aneg_en = 1;
+ else
+ priv->pause_aneg_en = 0;
+
+ oldadv = phydev->advertising &
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ if (oldadv != newadv) {
+ phydev->advertising &=
+ ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ phydev->advertising |= newadv;
+ if (phydev->autoneg)
+ /* inform link partner of our
+ * new flow ctrl settings
+ */
+ return phy_start_aneg(phydev);
+
+ if (!epause->autoneg) {
+ u32 tempval;
+ tempval = gfar_read(&regs->maccfg1);
+ tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+
+ priv->tx_actual_en = 0;
+ if (priv->tx_pause_en) {
+ priv->tx_actual_en = 1;
+ tempval |= MACCFG1_TX_FLOW;
+ }
+
+ if (priv->rx_pause_en)
+ tempval |= MACCFG1_RX_FLOW;
+ gfar_write(&regs->maccfg1, tempval);
+ }
+ }
+
+ return 0;
+}
+
+int gfar_set_features(struct net_device *dev, netdev_features_t features)
+{
+ netdev_features_t changed = dev->features ^ features;
+ struct gfar_private *priv = netdev_priv(dev);
+ int err = 0;
+
+ if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_RXCSUM)))
+ return 0;
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ dev->features = features;
+
+ if (dev->flags & IFF_UP) {
+ /* Now we take down the rings to rebuild them */
+ stop_gfar(dev);
+ err = startup_gfar(dev);
+ } else {
+ gfar_mac_reset(priv);
+ }
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
+ return err;
+}
+
+static uint32_t gfar_get_msglevel(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ priv->msg_enable = data;
+}
+
+#ifdef CONFIG_PM
+static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
+ } else {
+ wol->supported = wol->wolopts = 0;
+ }
+}
+
+static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
+ wol->wolopts != 0)
+ return -EINVAL;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+
+ spin_lock_irqsave(&priv->bflock, flags);
+ priv->wol_en = !!device_may_wakeup(&dev->dev);
+ spin_unlock_irqrestore(&priv->bflock, flags);
+
+ return 0;
+}
+#endif
+
+static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
+{
+ u32 fcr = 0x0, fpr = FPR_FILER_MASK;
+
+ if (ethflow & RXH_L2DA) {
+ fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+
+ fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_VLAN) {
+ fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_IP_SRC) {
+ fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & (RXH_IP_DST)) {
+ fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_L3_PROTO) {
+ fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_L4_B_0_1) {
+ fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+ if (ethflow & RXH_L4_B_2_3) {
+ fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
+ RQFCR_AND | RQFCR_HASHTBL_0;
+ priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
+ priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
+ gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+}
+
+static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
+ u64 class)
+{
+ unsigned int last_rule_idx = priv->cur_filer_idx;
+ unsigned int cmp_rqfpr;
+ unsigned int *local_rqfpr;
+ unsigned int *local_rqfcr;
+ int i = 0x0, k = 0x0;
+ int j = MAX_FILER_IDX, l = 0x0;
+ int ret = 1;
+
+ local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
+ GFP_KERNEL);
+ local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!local_rqfpr || !local_rqfcr) {
+ ret = 0;
+ goto err;
+ }
+
+ switch (class) {
+ case TCP_V4_FLOW:
+ cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
+ break;
+ case UDP_V4_FLOW:
+ cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
+ break;
+ case TCP_V6_FLOW:
+ cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
+ break;
+ case UDP_V6_FLOW:
+ cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
+ break;
+ default:
+ netdev_err(priv->ndev,
+ "Right now this class is not supported\n");
+ ret = 0;
+ goto err;
+ }
+
+ for (i = 0; i < MAX_FILER_IDX + 1; i++) {
+ local_rqfpr[j] = priv->ftp_rqfpr[i];
+ local_rqfcr[j] = priv->ftp_rqfcr[i];
+ j--;
+ if ((priv->ftp_rqfcr[i] ==
+ (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
+ (priv->ftp_rqfpr[i] == cmp_rqfpr))
+ break;
+ }
+
+ if (i == MAX_FILER_IDX + 1) {
+ netdev_err(priv->ndev,
+ "No parse rule found, can't create hash rules\n");
+ ret = 0;
+ goto err;
+ }
+
+ /* If a match was found, then it begins the starting of a cluster rule
+ * if it was already programmed, we need to overwrite these rules
+ */
+ for (l = i+1; l < MAX_FILER_IDX; l++) {
+ if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+ !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
+ priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
+ RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
+ priv->ftp_rqfpr[l] = FPR_FILER_MASK;
+ gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
+ priv->ftp_rqfpr[l]);
+ break;
+ }
+
+ if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
+ (priv->ftp_rqfcr[l] & RQFCR_AND))
+ continue;
+ else {
+ local_rqfpr[j] = priv->ftp_rqfpr[l];
+ local_rqfcr[j] = priv->ftp_rqfcr[l];
+ j--;
+ }
+ }
+
+ priv->cur_filer_idx = l - 1;
+ last_rule_idx = l;
+
+ /* hash rules */
+ ethflow_to_filer_rules(priv, ethflow);
+
+ /* Write back the popped out rules again */
+ for (k = j+1; k < MAX_FILER_IDX; k++) {
+ priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
+ priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
+ gfar_write_filer(priv, priv->cur_filer_idx,
+ local_rqfcr[k], local_rqfpr[k]);
+ if (!priv->cur_filer_idx)
+ break;
+ priv->cur_filer_idx = priv->cur_filer_idx - 1;
+ }
+
+err:
+ kfree(local_rqfcr);
+ kfree(local_rqfpr);
+ return ret;
+}
+
+static int gfar_set_hash_opts(struct gfar_private *priv,
+ struct ethtool_rxnfc *cmd)
+{
+ /* write the filer rules here */
+ if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int gfar_check_filer_hardware(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 i;
+
+ /* Check if we are in FIFO mode */
+ i = gfar_read(&regs->ecntrl);
+ i &= ECNTRL_FIFM;
+ if (i == ECNTRL_FIFM) {
+ netdev_notice(priv->ndev, "Interface in FIFO mode\n");
+ i = gfar_read(&regs->rctrl);
+ i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
+ if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
+ netdev_info(priv->ndev,
+ "Receive Queue Filtering enabled\n");
+ } else {
+ netdev_warn(priv->ndev,
+ "Receive Queue Filtering disabled\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ /* Or in standard mode */
+ else {
+ i = gfar_read(&regs->rctrl);
+ i &= RCTRL_PRSDEP_MASK;
+ if (i == RCTRL_PRSDEP_MASK) {
+ netdev_info(priv->ndev,
+ "Receive Queue Filtering enabled\n");
+ } else {
+ netdev_warn(priv->ndev,
+ "Receive Queue Filtering disabled\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* Sets the properties for arbitrary filer rule
+ * to the first 4 Layer 4 Bytes
+ */
+ gfar_write(&regs->rbifx, 0xC0C1C2C3);
+ return 0;
+}
+
+static int gfar_comp_asc(const void *a, const void *b)
+{
+ return memcmp(a, b, 4);
+}
+
+static int gfar_comp_desc(const void *a, const void *b)
+{
+ return -memcmp(a, b, 4);
+}
+
+static void gfar_swap(void *a, void *b, int size)
+{
+ u32 *_a = a;
+ u32 *_b = b;
+
+ swap(_a[0], _b[0]);
+ swap(_a[1], _b[1]);
+ swap(_a[2], _b[2]);
+ swap(_a[3], _b[3]);
+}
+
+/* Write a mask to filer cache */
+static void gfar_set_mask(u32 mask, struct filer_table *tab)
+{
+ tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+ tab->fe[tab->index].prop = mask;
+ tab->index++;
+}
+
+/* Sets parse bits (e.g. IP or TCP) */
+static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
+{
+ gfar_set_mask(mask, tab);
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
+ RQFCR_AND;
+ tab->fe[tab->index].prop = value;
+ tab->index++;
+}
+
+static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
+ struct filer_table *tab)
+{
+ gfar_set_mask(mask, tab);
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
+ tab->fe[tab->index].prop = value;
+ tab->index++;
+}
+
+/* For setting a tuple of value and mask of type flag
+ * Example:
+ * IP-Src = 10.0.0.0/255.0.0.0
+ * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
+ *
+ * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
+ * For a don't care mask it gives us a 0
+ *
+ * The check if don't care and the mask adjustment if mask=0 is done for VLAN
+ * and MAC stuff on an upper level (due to missing information on this level).
+ * For these guys we can discard them if they are value=0 and mask=0.
+ *
+ * Further the all masks are one-padded for better hardware efficiency.
+ */
+static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
+ struct filer_table *tab)
+{
+ switch (flag) {
+ /* 3bit */
+ case RQFCR_PID_PRI:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_PRI_MASK;
+ break;
+ /* 8bit */
+ case RQFCR_PID_L4P:
+ case RQFCR_PID_TOS:
+ if (!~(mask | RQFCR_PID_L4P_MASK))
+ return;
+ if (!mask)
+ mask = ~0;
+ else
+ mask |= RQFCR_PID_L4P_MASK;
+ break;
+ /* 12bit */
+ case RQFCR_PID_VID:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_VID_MASK;
+ break;
+ /* 16bit */
+ case RQFCR_PID_DPT:
+ case RQFCR_PID_SPT:
+ case RQFCR_PID_ETY:
+ if (!~(mask | RQFCR_PID_PORT_MASK))
+ return;
+ if (!mask)
+ mask = ~0;
+ else
+ mask |= RQFCR_PID_PORT_MASK;
+ break;
+ /* 24bit */
+ case RQFCR_PID_DAH:
+ case RQFCR_PID_DAL:
+ case RQFCR_PID_SAH:
+ case RQFCR_PID_SAL:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_MAC_MASK;
+ break;
+ /* for all real 32bit masks */
+ default:
+ if (!~mask)
+ return;
+ if (!mask)
+ mask = ~0;
+ break;
+ }
+ gfar_set_general_attribute(value, mask, flag, tab);
+}
+
+/* Translates value and mask for UDP, TCP or SCTP */
+static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
+ struct ethtool_tcpip4_spec *mask,
+ struct filer_table *tab)
+{
+ gfar_set_attribute(be32_to_cpu(value->ip4src),
+ be32_to_cpu(mask->ip4src),
+ RQFCR_PID_SIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4dst),
+ be32_to_cpu(mask->ip4dst),
+ RQFCR_PID_DIA, tab);
+ gfar_set_attribute(be16_to_cpu(value->pdst),
+ be16_to_cpu(mask->pdst),
+ RQFCR_PID_DPT, tab);
+ gfar_set_attribute(be16_to_cpu(value->psrc),
+ be16_to_cpu(mask->psrc),
+ RQFCR_PID_SPT, tab);
+ gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
+}
+
+/* Translates value and mask for RAW-IP4 */
+static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
+ struct ethtool_usrip4_spec *mask,
+ struct filer_table *tab)
+{
+ gfar_set_attribute(be32_to_cpu(value->ip4src),
+ be32_to_cpu(mask->ip4src),
+ RQFCR_PID_SIA, tab);
+ gfar_set_attribute(be32_to_cpu(value->ip4dst),
+ be32_to_cpu(mask->ip4dst),
+ RQFCR_PID_DIA, tab);
+ gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
+ gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
+ gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
+ be32_to_cpu(mask->l4_4_bytes),
+ RQFCR_PID_ARB, tab);
+
+}
+
+/* Translates value and mask for ETHER spec */
+static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
+ struct filer_table *tab)
+{
+ u32 upper_temp_mask = 0;
+ u32 lower_temp_mask = 0;
+
+ /* Source address */
+ if (!is_broadcast_ether_addr(mask->h_source)) {
+ if (is_zero_ether_addr(mask->h_source)) {
+ upper_temp_mask = 0xFFFFFFFF;
+ lower_temp_mask = 0xFFFFFFFF;
+ } else {
+ upper_temp_mask = mask->h_source[0] << 16 |
+ mask->h_source[1] << 8 |
+ mask->h_source[2];
+ lower_temp_mask = mask->h_source[3] << 16 |
+ mask->h_source[4] << 8 |
+ mask->h_source[5];
+ }
+ /* Upper 24bit */
+ gfar_set_attribute(value->h_source[0] << 16 |
+ value->h_source[1] << 8 |
+ value->h_source[2],
+ upper_temp_mask, RQFCR_PID_SAH, tab);
+ /* And the same for the lower part */
+ gfar_set_attribute(value->h_source[3] << 16 |
+ value->h_source[4] << 8 |
+ value->h_source[5],
+ lower_temp_mask, RQFCR_PID_SAL, tab);
+ }
+ /* Destination address */
+ if (!is_broadcast_ether_addr(mask->h_dest)) {
+ /* Special for destination is limited broadcast */
+ if ((is_broadcast_ether_addr(value->h_dest) &&
+ is_zero_ether_addr(mask->h_dest))) {
+ gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
+ } else {
+ if (is_zero_ether_addr(mask->h_dest)) {
+ upper_temp_mask = 0xFFFFFFFF;
+ lower_temp_mask = 0xFFFFFFFF;
+ } else {
+ upper_temp_mask = mask->h_dest[0] << 16 |
+ mask->h_dest[1] << 8 |
+ mask->h_dest[2];
+ lower_temp_mask = mask->h_dest[3] << 16 |
+ mask->h_dest[4] << 8 |
+ mask->h_dest[5];
+ }
+
+ /* Upper 24bit */
+ gfar_set_attribute(value->h_dest[0] << 16 |
+ value->h_dest[1] << 8 |
+ value->h_dest[2],
+ upper_temp_mask, RQFCR_PID_DAH, tab);
+ /* And the same for the lower part */
+ gfar_set_attribute(value->h_dest[3] << 16 |
+ value->h_dest[4] << 8 |
+ value->h_dest[5],
+ lower_temp_mask, RQFCR_PID_DAL, tab);
+ }
+ }
+
+ gfar_set_attribute(be16_to_cpu(value->h_proto),
+ be16_to_cpu(mask->h_proto),
+ RQFCR_PID_ETY, tab);
+}
+
+static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
+}
+
+static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
+}
+
+static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
+}
+
+static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
+{
+ return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
+}
+
+static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
+{
+ return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+}
+
+static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
+{
+ return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+}
+
+/* Convert a rule to binary filter format of gianfar */
+static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
+ struct filer_table *tab)
+{
+ u32 vlan = 0, vlan_mask = 0;
+ u32 id = 0, id_mask = 0;
+ u32 cfi = 0, cfi_mask = 0;
+ u32 prio = 0, prio_mask = 0;
+ u32 old_index = tab->index;
+
+ /* Check if vlan is wanted */
+ if ((rule->flow_type & FLOW_EXT) &&
+ (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
+ if (!rule->m_ext.vlan_tci)
+ rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
+
+ vlan = RQFPR_VLN;
+ vlan_mask = RQFPR_VLN;
+
+ /* Separate the fields */
+ id = vlan_tci_vid(rule);
+ id_mask = vlan_tci_vidm(rule);
+ cfi = vlan_tci_cfi(rule);
+ cfi_mask = vlan_tci_cfim(rule);
+ prio = vlan_tci_prio(rule);
+ prio_mask = vlan_tci_priom(rule);
+
+ if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
+ vlan |= RQFPR_CFI;
+ vlan_mask |= RQFPR_CFI;
+ } else if (cfi != VLAN_TAG_PRESENT &&
+ cfi_mask == VLAN_TAG_PRESENT) {
+ vlan_mask |= RQFPR_CFI;
+ }
+ }
+
+ switch (rule->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
+ RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
+ gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
+ &rule->m_u.tcp_ip4_spec, tab);
+ break;
+ case UDP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
+ RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
+ gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
+ &rule->m_u.udp_ip4_spec, tab);
+ break;
+ case SCTP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
+ tab);
+ gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
+ gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
+ (struct ethtool_tcpip4_spec *)&rule->m_u,
+ tab);
+ break;
+ case IP_USER_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
+ tab);
+ gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
+ (struct ethtool_usrip4_spec *) &rule->m_u,
+ tab);
+ break;
+ case ETHER_FLOW:
+ if (vlan)
+ gfar_set_parse_bits(vlan, vlan_mask, tab);
+ gfar_set_ether((struct ethhdr *) &rule->h_u,
+ (struct ethhdr *) &rule->m_u, tab);
+ break;
+ default:
+ return -1;
+ }
+
+ /* Set the vlan attributes in the end */
+ if (vlan) {
+ gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
+ gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
+ }
+
+ /* If there has been nothing written till now, it must be a default */
+ if (tab->index == old_index) {
+ gfar_set_mask(0xFFFFFFFF, tab);
+ tab->fe[tab->index].ctrl = 0x20;
+ tab->fe[tab->index].prop = 0x0;
+ tab->index++;
+ }
+
+ /* Remove last AND */
+ tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
+
+ /* Specify which queue to use or to drop */
+ if (rule->ring_cookie == RX_CLS_FLOW_DISC)
+ tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
+ else
+ tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
+
+ /* Only big enough entries can be clustered */
+ if (tab->index > (old_index + 2)) {
+ tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
+ tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
+ }
+
+ /* In rare cases the cache can be full while there is
+ * free space in hw
+ */
+ if (tab->index > MAX_FILER_CACHE_IDX - 1)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Copy size filer entries */
+static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
+ struct gfar_filer_entry src[0], s32 size)
+{
+ while (size > 0) {
+ size--;
+ dst[size].ctrl = src[size].ctrl;
+ dst[size].prop = src[size].prop;
+ }
+}
+
+/* Delete the contents of the filer-table between start and end
+ * and collapse them
+ */
+static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
+{
+ int length;
+
+ if (end > MAX_FILER_CACHE_IDX || end < begin)
+ return -EINVAL;
+
+ end++;
+ length = end - begin;
+
+ /* Copy */
+ while (end < tab->index) {
+ tab->fe[begin].ctrl = tab->fe[end].ctrl;
+ tab->fe[begin++].prop = tab->fe[end++].prop;
+
+ }
+ /* Fill up with don't cares */
+ while (begin < tab->index) {
+ tab->fe[begin].ctrl = 0x60;
+ tab->fe[begin].prop = 0xFFFFFFFF;
+ begin++;
+ }
+
+ tab->index -= length;
+ return 0;
+}
+
+/* Make space on the wanted location */
+static int gfar_expand_filer_entries(u32 begin, u32 length,
+ struct filer_table *tab)
+{
+ if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
+ begin > MAX_FILER_CACHE_IDX)
+ return -EINVAL;
+
+ gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
+ tab->index - length + 1);
+
+ tab->index += length;
+ return 0;
+}
+
+static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
+{
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
+ start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
+ (RQFCR_AND | RQFCR_CLE))
+ return start;
+ }
+ return -1;
+}
+
+static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
+{
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
+ start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
+ (RQFCR_CLE))
+ return start;
+ }
+ return -1;
+}
+
+/* Uses hardwares clustering option to reduce
+ * the number of filer table entries
+ */
+static void gfar_cluster_filer(struct filer_table *tab)
+{
+ s32 i = -1, j, iend, jend;
+
+ while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
+ j = i;
+ while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
+ /* The cluster entries self and the previous one
+ * (a mask) must be identical!
+ */
+ if (tab->fe[i].ctrl != tab->fe[j].ctrl)
+ break;
+ if (tab->fe[i].prop != tab->fe[j].prop)
+ break;
+ if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
+ break;
+ if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
+ break;
+ iend = gfar_get_next_cluster_end(i, tab);
+ jend = gfar_get_next_cluster_end(j, tab);
+ if (jend == -1 || iend == -1)
+ break;
+
+ /* First we make some free space, where our cluster
+ * element should be. Then we copy it there and finally
+ * delete in from its old location.
+ */
+ if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
+ -EINVAL)
+ break;
+
+ gfar_copy_filer_entries(&(tab->fe[iend + 1]),
+ &(tab->fe[jend + 1]), jend - j);
+
+ if (gfar_trim_filer_entries(jend - 1,
+ jend + (jend - j),
+ tab) == -EINVAL)
+ return;
+
+ /* Mask out cluster bit */
+ tab->fe[iend].ctrl &= ~(RQFCR_CLE);
+ }
+ }
+}
+
+/* Swaps the masked bits of a1<>a2 and b1<>b2 */
+static void gfar_swap_bits(struct gfar_filer_entry *a1,
+ struct gfar_filer_entry *a2,
+ struct gfar_filer_entry *b1,
+ struct gfar_filer_entry *b2, u32 mask)
+{
+ u32 temp[4];
+ temp[0] = a1->ctrl & mask;
+ temp[1] = a2->ctrl & mask;
+ temp[2] = b1->ctrl & mask;
+ temp[3] = b2->ctrl & mask;
+
+ a1->ctrl &= ~mask;
+ a2->ctrl &= ~mask;
+ b1->ctrl &= ~mask;
+ b2->ctrl &= ~mask;
+
+ a1->ctrl |= temp[1];
+ a2->ctrl |= temp[0];
+ b1->ctrl |= temp[3];
+ b2->ctrl |= temp[2];
+}
+
+/* Generate a list consisting of masks values with their start and
+ * end of validity and block as indicator for parts belonging
+ * together (glued by ANDs) in mask_table
+ */
+static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
+ struct filer_table *tab)
+{
+ u32 i, and_index = 0, block_index = 1;
+
+ for (i = 0; i < tab->index; i++) {
+
+ /* LSByte of control = 0 sets a mask */
+ if (!(tab->fe[i].ctrl & 0xF)) {
+ mask_table[and_index].mask = tab->fe[i].prop;
+ mask_table[and_index].start = i;
+ mask_table[and_index].block = block_index;
+ if (and_index >= 1)
+ mask_table[and_index - 1].end = i - 1;
+ and_index++;
+ }
+ /* cluster starts and ends will be separated because they should
+ * hold their position
+ */
+ if (tab->fe[i].ctrl & RQFCR_CLE)
+ block_index++;
+ /* A not set AND indicates the end of a depended block */
+ if (!(tab->fe[i].ctrl & RQFCR_AND))
+ block_index++;
+ }
+
+ mask_table[and_index - 1].end = i - 1;
+
+ return and_index;
+}
+
+/* Sorts the entries of mask_table by the values of the masks.
+ * Important: The 0xFF80 flags of the first and last entry of a
+ * block must hold their position (which queue, CLusterEnable, ReJEct,
+ * AND)
+ */
+static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
+ struct filer_table *temp_table, u32 and_index)
+{
+ /* Pointer to compare function (_asc or _desc) */
+ int (*gfar_comp)(const void *, const void *);
+
+ u32 i, size = 0, start = 0, prev = 1;
+ u32 old_first, old_last, new_first, new_last;
+
+ gfar_comp = &gfar_comp_desc;
+
+ for (i = 0; i < and_index; i++) {
+ if (prev != mask_table[i].block) {
+ old_first = mask_table[start].start + 1;
+ old_last = mask_table[i - 1].end;
+ sort(mask_table + start, size,
+ sizeof(struct gfar_mask_entry),
+ gfar_comp, &gfar_swap);
+
+ /* Toggle order for every block. This makes the
+ * thing more efficient!
+ */
+ if (gfar_comp == gfar_comp_desc)
+ gfar_comp = &gfar_comp_asc;
+ else
+ gfar_comp = &gfar_comp_desc;
+
+ new_first = mask_table[start].start + 1;
+ new_last = mask_table[i - 1].end;
+
+ gfar_swap_bits(&temp_table->fe[new_first],
+ &temp_table->fe[old_first],
+ &temp_table->fe[new_last],
+ &temp_table->fe[old_last],
+ RQFCR_QUEUE | RQFCR_CLE |
+ RQFCR_RJE | RQFCR_AND);
+
+ start = i;
+ size = 0;
+ }
+ size++;
+ prev = mask_table[i].block;
+ }
+}
+
+/* Reduces the number of masks needed in the filer table to save entries
+ * This is done by sorting the masks of a depended block. A depended block is
+ * identified by gluing ANDs or CLE. The sorting order toggles after every
+ * block. Of course entries in scope of a mask must change their location with
+ * it.
+ */
+static int gfar_optimize_filer_masks(struct filer_table *tab)
+{
+ struct filer_table *temp_table;
+ struct gfar_mask_entry *mask_table;
+
+ u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
+ s32 ret = 0;
+
+ /* We need a copy of the filer table because
+ * we want to change its order
+ */
+ temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
+ if (temp_table == NULL)
+ return -ENOMEM;
+
+ mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
+ sizeof(struct gfar_mask_entry), GFP_KERNEL);
+
+ if (mask_table == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ and_index = gfar_generate_mask_table(mask_table, tab);
+
+ gfar_sort_mask_table(mask_table, temp_table, and_index);
+
+ /* Now we can copy the data from our duplicated filer table to
+ * the real one in the order the mask table says
+ */
+ for (i = 0; i < and_index; i++) {
+ size = mask_table[i].end - mask_table[i].start + 1;
+ gfar_copy_filer_entries(&(tab->fe[j]),
+ &(temp_table->fe[mask_table[i].start]), size);
+ j += size;
+ }
+
+ /* And finally we just have to check for duplicated masks and drop the
+ * second ones
+ */
+ for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
+ if (tab->fe[i].ctrl == 0x80) {
+ previous_mask = i++;
+ break;
+ }
+ }
+ for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
+ if (tab->fe[i].ctrl == 0x80) {
+ if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
+ /* Two identical ones found!
+ * So drop the second one!
+ */
+ gfar_trim_filer_entries(i, i, tab);
+ } else
+ /* Not identical! */
+ previous_mask = i;
+ }
+ }
+
+ kfree(mask_table);
+end: kfree(temp_table);
+ return ret;
+}
+
+/* Write the bit-pattern from software's buffer to hardware registers */
+static int gfar_write_filer_table(struct gfar_private *priv,
+ struct filer_table *tab)
+{
+ u32 i = 0;
+ if (tab->index > MAX_FILER_IDX - 1)
+ return -EBUSY;
+
+ /* Fill regular entries */
+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
+ i++)
+ gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
+ /* Fill the rest with fall-troughs */
+ for (; i < MAX_FILER_IDX - 1; i++)
+ gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
+ /* Last entry must be default accept
+ * because that's what people expect
+ */
+ gfar_write_filer(priv, i, 0x20, 0x0);
+
+ return 0;
+}
+
+static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
+ struct gfar_private *priv)
+{
+
+ if (flow->flow_type & FLOW_EXT) {
+ if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
+ netdev_warn(priv->ndev,
+ "User-specific data not supported!\n");
+ if (~flow->m_ext.vlan_etype)
+ netdev_warn(priv->ndev,
+ "VLAN-etype not supported!\n");
+ }
+ if (flow->flow_type == IP_USER_FLOW)
+ if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ netdev_warn(priv->ndev,
+ "IP-Version differing from IPv4 not supported!\n");
+
+ return 0;
+}
+
+static int gfar_process_filer_changes(struct gfar_private *priv)
+{
+ struct ethtool_flow_spec_container *j;
+ struct filer_table *tab;
+ s32 i = 0;
+ s32 ret = 0;
+
+ /* So index is set to zero, too! */
+ tab = kzalloc(sizeof(*tab), GFP_KERNEL);
+ if (tab == NULL)
+ return -ENOMEM;
+
+ /* Now convert the existing filer data from flow_spec into
+ * filer tables binary format
+ */
+ list_for_each_entry(j, &priv->rx_list.list, list) {
+ ret = gfar_convert_to_filer(&j->fs, tab);
+ if (ret == -EBUSY) {
+ netdev_err(priv->ndev,
+ "Rule not added: No free space!\n");
+ goto end;
+ }
+ if (ret == -1) {
+ netdev_err(priv->ndev,
+ "Rule not added: Unsupported Flow-type!\n");
+ goto end;
+ }
+ }
+
+ i = tab->index;
+
+ /* Optimizations to save entries */
+ gfar_cluster_filer(tab);
+ gfar_optimize_filer_masks(tab);
+
+ pr_debug("\tSummary:\n"
+ "\tData on hardware: %d\n"
+ "\tCompression rate: %d%%\n",
+ tab->index, 100 - (100 * tab->index) / i);
+
+ /* Write everything to hardware */
+ ret = gfar_write_filer_table(priv, tab);
+ if (ret == -EBUSY) {
+ netdev_err(priv->ndev, "Rule not added: No free space!\n");
+ goto end;
+ }
+
+end:
+ kfree(tab);
+ return ret;
+}
+
+static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
+{
+ u32 i = 0;
+
+ for (i = 0; i < sizeof(flow->m_u); i++)
+ flow->m_u.hdata[i] ^= 0xFF;
+
+ flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
+ flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
+ flow->m_ext.data[0] ^= cpu_to_be32(~0);
+ flow->m_ext.data[1] ^= cpu_to_be32(~0);
+}
+
+static int gfar_add_cls(struct gfar_private *priv,
+ struct ethtool_rx_flow_spec *flow)
+{
+ struct ethtool_flow_spec_container *temp, *comp;
+ int ret = 0;
+
+ temp = kmalloc(sizeof(*temp), GFP_KERNEL);
+ if (temp == NULL)
+ return -ENOMEM;
+ memcpy(&temp->fs, flow, sizeof(temp->fs));
+
+ gfar_invert_masks(&temp->fs);
+ ret = gfar_check_capability(&temp->fs, priv);
+ if (ret)
+ goto clean_mem;
+ /* Link in the new element at the right @location */
+ if (list_empty(&priv->rx_list.list)) {
+ ret = gfar_check_filer_hardware(priv);
+ if (ret != 0)
+ goto clean_mem;
+ list_add(&temp->list, &priv->rx_list.list);
+ goto process;
+ } else {
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location > flow->location) {
+ list_add_tail(&temp->list, &comp->list);
+ goto process;
+ }
+ if (comp->fs.location == flow->location) {
+ netdev_err(priv->ndev,
+ "Rule not added: ID %d not free!\n",
+ flow->location);
+ ret = -EBUSY;
+ goto clean_mem;
+ }
+ }
+ list_add_tail(&temp->list, &priv->rx_list.list);
+ }
+
+process:
+ ret = gfar_process_filer_changes(priv);
+ if (ret)
+ goto clean_list;
+ priv->rx_list.count++;
+ return ret;
+
+clean_list:
+ list_del(&temp->list);
+clean_mem:
+ kfree(temp);
+ return ret;
+}
+
+static int gfar_del_cls(struct gfar_private *priv, u32 loc)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 ret = -EINVAL;
+
+ if (list_empty(&priv->rx_list.list))
+ return ret;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location == loc) {
+ list_del(&comp->list);
+ kfree(comp);
+ priv->rx_list.count--;
+ gfar_process_filer_changes(priv);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 ret = -EINVAL;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location == cmd->fs.location) {
+ memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
+ gfar_invert_masks(&cmd->fs);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int gfar_get_cls_all(struct gfar_private *priv,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 i = 0;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (i == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[i] = comp->fs.location;
+ i++;
+ }
+
+ cmd->data = MAX_FILER_IDX;
+ cmd->rule_cnt = i;
+
+ return 0;
+}
+
+static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return -EBUSY;
+
+ mutex_lock(&priv->rx_queue_access);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = gfar_set_hash_opts(priv, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
+ cmd->fs.ring_cookie >= priv->num_rx_queues) ||
+ cmd->fs.location >= MAX_FILER_IDX) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = gfar_add_cls(priv, &cmd->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = gfar_del_cls(priv, cmd->fs.location);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->rx_queue_access);
+
+ return ret;
+}
+
+static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->num_rx_queues;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = priv->rx_list.count;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = gfar_get_cls(priv, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = gfar_get_cls_all(priv, cmd, rule_locs);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+int gfar_phc_index = -1;
+EXPORT_SYMBOL(gfar_phc_index);
+
+static int gfar_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ return 0;
+ }
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = gfar_phc_index;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+const struct ethtool_ops gfar_ethtool_ops = {
+ .get_settings = gfar_gsettings,
+ .set_settings = gfar_ssettings,
+ .get_drvinfo = gfar_gdrvinfo,
+ .get_regs_len = gfar_reglen,
+ .get_regs = gfar_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = gfar_gcoalesce,
+ .set_coalesce = gfar_scoalesce,
+ .get_ringparam = gfar_gringparam,
+ .set_ringparam = gfar_sringparam,
+ .get_pauseparam = gfar_gpauseparam,
+ .set_pauseparam = gfar_spauseparam,
+ .get_strings = gfar_gstrings,
+ .get_sset_count = gfar_sset_count,
+ .get_ethtool_stats = gfar_fill_stats,
+ .get_msglevel = gfar_get_msglevel,
+ .set_msglevel = gfar_set_msglevel,
+#ifdef CONFIG_PM
+ .get_wol = gfar_get_wol,
+ .set_wol = gfar_set_wol,
+#endif
+ .set_rxnfc = gfar_set_nfc,
+ .get_rxnfc = gfar_get_nfc,
+ .get_ts_info = gfar_get_ts_info,
+};
diff --git a/kernel/drivers/net/ethernet/freescale/gianfar_ptp.c b/kernel/drivers/net/ethernet/freescale/gianfar_ptp.c
new file mode 100644
index 000000000..8e3cd77aa
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -0,0 +1,574 @@
+/*
+ * PTP 1588 clock using the eTSEC
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+
+#include <linux/ptp_clock_kernel.h>
+
+#include "gianfar.h"
+
+/*
+ * gianfar ptp registers
+ * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
+ */
+struct gianfar_ptp_registers {
+ u32 tmr_ctrl; /* Timer control register */
+ u32 tmr_tevent; /* Timestamp event register */
+ u32 tmr_temask; /* Timer event mask register */
+ u32 tmr_pevent; /* Timestamp event register */
+ u32 tmr_pemask; /* Timer event mask register */
+ u32 tmr_stat; /* Timestamp status register */
+ u32 tmr_cnt_h; /* Timer counter high register */
+ u32 tmr_cnt_l; /* Timer counter low register */
+ u32 tmr_add; /* Timer drift compensation addend register */
+ u32 tmr_acc; /* Timer accumulator register */
+ u32 tmr_prsc; /* Timer prescale */
+ u8 res1[4];
+ u32 tmroff_h; /* Timer offset high */
+ u32 tmroff_l; /* Timer offset low */
+ u8 res2[8];
+ u32 tmr_alarm1_h; /* Timer alarm 1 high register */
+ u32 tmr_alarm1_l; /* Timer alarm 1 high register */
+ u32 tmr_alarm2_h; /* Timer alarm 2 high register */
+ u32 tmr_alarm2_l; /* Timer alarm 2 high register */
+ u8 res3[48];
+ u32 tmr_fiper1; /* Timer fixed period interval */
+ u32 tmr_fiper2; /* Timer fixed period interval */
+ u32 tmr_fiper3; /* Timer fixed period interval */
+ u8 res4[20];
+ u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
+ u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
+};
+
+/* Bit definitions for the TMR_CTRL register */
+#define ALM1P (1<<31) /* Alarm1 output polarity */
+#define ALM2P (1<<30) /* Alarm2 output polarity */
+#define FS (1<<28) /* FIPER start indication */
+#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */
+#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */
+#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */
+#define TCLK_PERIOD_MASK (0x3ff)
+#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */
+#define FRD (1<<14) /* FIPER Realignment Disable */
+#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */
+#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */
+#define ETEP2 (1<<9) /* External trigger 2 edge polarity */
+#define ETEP1 (1<<8) /* External trigger 1 edge polarity */
+#define COPH (1<<7) /* Generated clock output phase. */
+#define CIPH (1<<6) /* External oscillator input clock phase */
+#define TMSR (1<<5) /* Timer soft reset. */
+#define BYP (1<<3) /* Bypass drift compensated clock */
+#define TE (1<<2) /* 1588 timer enable. */
+#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */
+#define CKSEL_MASK (0x3)
+
+/* Bit definitions for the TMR_TEVENT register */
+#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */
+#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */
+#define ALM2 (1<<17) /* Current time = alarm time register 2 */
+#define ALM1 (1<<16) /* Current time = alarm time register 1 */
+#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */
+#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */
+#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */
+
+/* Bit definitions for the TMR_TEMASK register */
+#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */
+#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */
+#define ALM2EN (1<<17) /* Timer ALM2 event enable */
+#define ALM1EN (1<<16) /* Timer ALM1 event enable */
+#define PP1EN (1<<7) /* Periodic pulse event 1 enable */
+#define PP2EN (1<<6) /* Periodic pulse event 2 enable */
+
+/* Bit definitions for the TMR_PEVENT register */
+#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */
+#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */
+#define RXP (1<<0) /* PTP frame has been received */
+
+/* Bit definitions for the TMR_PEMASK register */
+#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */
+#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */
+#define RXPEN (1<<0) /* Receive PTP packet event enable */
+
+/* Bit definitions for the TMR_STAT register */
+#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */
+#define STAT_VEC_MASK (0x3f)
+
+/* Bit definitions for the TMR_PRSC register */
+#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */
+#define PRSC_OCK_MASK (0xffff)
+
+
+#define DRIVER "gianfar_ptp"
+#define DEFAULT_CKSEL 1
+#define N_EXT_TS 2
+#define REG_SIZE sizeof(struct gianfar_ptp_registers)
+
+struct etsects {
+ struct gianfar_ptp_registers __iomem *regs;
+ spinlock_t lock; /* protects regs */
+ struct ptp_clock *clock;
+ struct ptp_clock_info caps;
+ struct resource *rsrc;
+ int irq;
+ u64 alarm_interval; /* for periodic alarm */
+ u64 alarm_value;
+ u32 tclk_period; /* nanoseconds */
+ u32 tmr_prsc;
+ u32 tmr_add;
+ u32 cksel;
+ u32 tmr_fiper1;
+ u32 tmr_fiper2;
+};
+
+/*
+ * Register access functions
+ */
+
+/* Caller must hold etsects->lock. */
+static u64 tmr_cnt_read(struct etsects *etsects)
+{
+ u64 ns;
+ u32 lo, hi;
+
+ lo = gfar_read(&etsects->regs->tmr_cnt_l);
+ hi = gfar_read(&etsects->regs->tmr_cnt_h);
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ return ns;
+}
+
+/* Caller must hold etsects->lock. */
+static void tmr_cnt_write(struct etsects *etsects, u64 ns)
+{
+ u32 hi = ns >> 32;
+ u32 lo = ns & 0xffffffff;
+
+ gfar_write(&etsects->regs->tmr_cnt_l, lo);
+ gfar_write(&etsects->regs->tmr_cnt_h, hi);
+}
+
+/* Caller must hold etsects->lock. */
+static void set_alarm(struct etsects *etsects)
+{
+ u64 ns;
+ u32 lo, hi;
+
+ ns = tmr_cnt_read(etsects) + 1500000000ULL;
+ ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
+ ns -= etsects->tclk_period;
+ hi = ns >> 32;
+ lo = ns & 0xffffffff;
+ gfar_write(&etsects->regs->tmr_alarm1_l, lo);
+ gfar_write(&etsects->regs->tmr_alarm1_h, hi);
+}
+
+/* Caller must hold etsects->lock. */
+static void set_fipers(struct etsects *etsects)
+{
+ set_alarm(etsects);
+ gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
+ gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
+}
+
+/*
+ * Interrupt service routine
+ */
+
+static irqreturn_t isr(int irq, void *priv)
+{
+ struct etsects *etsects = priv;
+ struct ptp_clock_event event;
+ u64 ns;
+ u32 ack = 0, lo, hi, mask, val;
+
+ val = gfar_read(&etsects->regs->tmr_tevent);
+
+ if (val & ETS1) {
+ ack |= ETS1;
+ hi = gfar_read(&etsects->regs->tmr_etts1_h);
+ lo = gfar_read(&etsects->regs->tmr_etts1_l);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ((u64) hi) << 32;
+ event.timestamp |= lo;
+ ptp_clock_event(etsects->clock, &event);
+ }
+
+ if (val & ETS2) {
+ ack |= ETS2;
+ hi = gfar_read(&etsects->regs->tmr_etts2_h);
+ lo = gfar_read(&etsects->regs->tmr_etts2_l);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 1;
+ event.timestamp = ((u64) hi) << 32;
+ event.timestamp |= lo;
+ ptp_clock_event(etsects->clock, &event);
+ }
+
+ if (val & ALM2) {
+ ack |= ALM2;
+ if (etsects->alarm_value) {
+ event.type = PTP_CLOCK_ALARM;
+ event.index = 0;
+ event.timestamp = etsects->alarm_value;
+ ptp_clock_event(etsects->clock, &event);
+ }
+ if (etsects->alarm_interval) {
+ ns = etsects->alarm_value + etsects->alarm_interval;
+ hi = ns >> 32;
+ lo = ns & 0xffffffff;
+ spin_lock(&etsects->lock);
+ gfar_write(&etsects->regs->tmr_alarm2_l, lo);
+ gfar_write(&etsects->regs->tmr_alarm2_h, hi);
+ spin_unlock(&etsects->lock);
+ etsects->alarm_value = ns;
+ } else {
+ gfar_write(&etsects->regs->tmr_tevent, ALM2);
+ spin_lock(&etsects->lock);
+ mask = gfar_read(&etsects->regs->tmr_temask);
+ mask &= ~ALM2EN;
+ gfar_write(&etsects->regs->tmr_temask, mask);
+ spin_unlock(&etsects->lock);
+ etsects->alarm_value = 0;
+ etsects->alarm_interval = 0;
+ }
+ }
+
+ if (val & PP1) {
+ ack |= PP1;
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(etsects->clock, &event);
+ }
+
+ if (ack) {
+ gfar_write(&etsects->regs->tmr_tevent, ack);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+/*
+ * PTP clock operations
+ */
+
+static int ptp_gianfar_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ u64 adj;
+ u32 diff, tmr_add;
+ int neg_adj = 0;
+ struct etsects *etsects = container_of(ptp, struct etsects, caps);
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ tmr_add = etsects->tmr_add;
+ adj = tmr_add;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+
+ tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
+
+ gfar_write(&etsects->regs->tmr_add, tmr_add);
+
+ return 0;
+}
+
+static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ s64 now;
+ unsigned long flags;
+ struct etsects *etsects = container_of(ptp, struct etsects, caps);
+
+ spin_lock_irqsave(&etsects->lock, flags);
+
+ now = tmr_cnt_read(etsects);
+ now += delta;
+ tmr_cnt_write(etsects, now);
+
+ spin_unlock_irqrestore(&etsects->lock, flags);
+
+ set_fipers(etsects);
+
+ return 0;
+}
+
+static int ptp_gianfar_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct etsects *etsects = container_of(ptp, struct etsects, caps);
+
+ spin_lock_irqsave(&etsects->lock, flags);
+
+ ns = tmr_cnt_read(etsects);
+
+ spin_unlock_irqrestore(&etsects->lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int ptp_gianfar_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct etsects *etsects = container_of(ptp, struct etsects, caps);
+
+ ns = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&etsects->lock, flags);
+
+ tmr_cnt_write(etsects, ns);
+ set_fipers(etsects);
+
+ spin_unlock_irqrestore(&etsects->lock, flags);
+
+ return 0;
+}
+
+static int ptp_gianfar_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct etsects *etsects = container_of(ptp, struct etsects, caps);
+ unsigned long flags;
+ u32 bit, mask;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ switch (rq->extts.index) {
+ case 0:
+ bit = ETS1EN;
+ break;
+ case 1:
+ bit = ETS2EN;
+ break;
+ default:
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&etsects->lock, flags);
+ mask = gfar_read(&etsects->regs->tmr_temask);
+ if (on)
+ mask |= bit;
+ else
+ mask &= ~bit;
+ gfar_write(&etsects->regs->tmr_temask, mask);
+ spin_unlock_irqrestore(&etsects->lock, flags);
+ return 0;
+
+ case PTP_CLK_REQ_PPS:
+ spin_lock_irqsave(&etsects->lock, flags);
+ mask = gfar_read(&etsects->regs->tmr_temask);
+ if (on)
+ mask |= PP1EN;
+ else
+ mask &= ~PP1EN;
+ gfar_write(&etsects->regs->tmr_temask, mask);
+ spin_unlock_irqrestore(&etsects->lock, flags);
+ return 0;
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_gianfar_caps = {
+ .owner = THIS_MODULE,
+ .name = "gianfar clock",
+ .max_adj = 512000,
+ .n_alarm = 0,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 1,
+ .adjfreq = ptp_gianfar_adjfreq,
+ .adjtime = ptp_gianfar_adjtime,
+ .gettime64 = ptp_gianfar_gettime,
+ .settime64 = ptp_gianfar_settime,
+ .enable = ptp_gianfar_enable,
+};
+
+/* OF device tree */
+
+static int get_of_u32(struct device_node *node, char *str, u32 *val)
+{
+ int plen;
+ const u32 *prop = of_get_property(node, str, &plen);
+
+ if (!prop || plen != sizeof(*prop))
+ return -1;
+ *val = *prop;
+ return 0;
+}
+
+static int gianfar_ptp_probe(struct platform_device *dev)
+{
+ struct device_node *node = dev->dev.of_node;
+ struct etsects *etsects;
+ struct timespec64 now;
+ int err = -ENOMEM;
+ u32 tmr_ctrl;
+ unsigned long flags;
+
+ etsects = kzalloc(sizeof(*etsects), GFP_KERNEL);
+ if (!etsects)
+ goto no_memory;
+
+ err = -ENODEV;
+
+ etsects->caps = ptp_gianfar_caps;
+
+ if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
+ etsects->cksel = DEFAULT_CKSEL;
+
+ if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
+ get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
+ get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) ||
+ get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
+ get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
+ get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) {
+ pr_err("device tree node missing required elements\n");
+ goto no_node;
+ }
+
+ etsects->irq = platform_get_irq(dev, 0);
+
+ if (etsects->irq == NO_IRQ) {
+ pr_err("irq not in device tree\n");
+ goto no_node;
+ }
+ if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) {
+ pr_err("request_irq failed\n");
+ goto no_node;
+ }
+
+ etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!etsects->rsrc) {
+ pr_err("no resource\n");
+ goto no_resource;
+ }
+ if (request_resource(&iomem_resource, etsects->rsrc)) {
+ pr_err("resource busy\n");
+ goto no_resource;
+ }
+
+ spin_lock_init(&etsects->lock);
+
+ etsects->regs = ioremap(etsects->rsrc->start,
+ resource_size(etsects->rsrc));
+ if (!etsects->regs) {
+ pr_err("ioremap ptp registers failed\n");
+ goto no_ioremap;
+ }
+ getnstimeofday64(&now);
+ ptp_gianfar_settime(&etsects->caps, &now);
+
+ tmr_ctrl =
+ (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
+ (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT;
+
+ spin_lock_irqsave(&etsects->lock, flags);
+
+ gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl);
+ gfar_write(&etsects->regs->tmr_add, etsects->tmr_add);
+ gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
+ gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
+ gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
+ set_alarm(etsects);
+ gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD);
+
+ spin_unlock_irqrestore(&etsects->lock, flags);
+
+ etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev);
+ if (IS_ERR(etsects->clock)) {
+ err = PTR_ERR(etsects->clock);
+ goto no_clock;
+ }
+ gfar_phc_index = ptp_clock_index(etsects->clock);
+
+ platform_set_drvdata(dev, etsects);
+
+ return 0;
+
+no_clock:
+ iounmap(etsects->regs);
+no_ioremap:
+ release_resource(etsects->rsrc);
+no_resource:
+ free_irq(etsects->irq, etsects);
+no_node:
+ kfree(etsects);
+no_memory:
+ return err;
+}
+
+static int gianfar_ptp_remove(struct platform_device *dev)
+{
+ struct etsects *etsects = platform_get_drvdata(dev);
+
+ gfar_write(&etsects->regs->tmr_temask, 0);
+ gfar_write(&etsects->regs->tmr_ctrl, 0);
+
+ gfar_phc_index = -1;
+ ptp_clock_unregister(etsects->clock);
+ iounmap(etsects->regs);
+ release_resource(etsects->rsrc);
+ free_irq(etsects->irq, etsects);
+ kfree(etsects);
+
+ return 0;
+}
+
+static const struct of_device_id match_table[] = {
+ { .compatible = "fsl,etsec-ptp" },
+ {},
+};
+
+static struct platform_driver gianfar_ptp_driver = {
+ .driver = {
+ .name = "gianfar_ptp",
+ .of_match_table = match_table,
+ },
+ .probe = gianfar_ptp_probe,
+ .remove = gianfar_ptp_remove,
+};
+
+module_platform_driver(gianfar_ptp_driver);
+
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
+MODULE_DESCRIPTION("PTP clock using the eTSEC");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/net/ethernet/freescale/ucc_geth.c b/kernel/drivers/net/ethernet/freescale/ucc_geth.c
new file mode 100644
index 000000000..4dd40e057
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/ucc_geth.c
@@ -0,0 +1,3983 @@
+/*
+ * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ * Li Yang <leoli@freescale.com>
+ *
+ * Description:
+ * QE UCC Gigabit Ethernet Driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/workqueue.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/immap_qe.h>
+#include <asm/qe.h>
+#include <asm/ucc.h>
+#include <asm/ucc_fast.h>
+#include <asm/machdep.h>
+
+#include "ucc_geth.h"
+
+#undef DEBUG
+
+#define ugeth_printk(level, format, arg...) \
+ printk(level format "\n", ## arg)
+
+#define ugeth_dbg(format, arg...) \
+ ugeth_printk(KERN_DEBUG , format , ## arg)
+
+#ifdef UGETH_VERBOSE_DEBUG
+#define ugeth_vdbg ugeth_dbg
+#else
+#define ugeth_vdbg(fmt, args...) do { } while (0)
+#endif /* UGETH_VERBOSE_DEBUG */
+#define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
+
+
+static DEFINE_SPINLOCK(ugeth_lock);
+
+static struct {
+ u32 msg_enable;
+} debug = { -1 };
+
+module_param_named(debug, debug.msg_enable, int, 0);
+MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
+
+static struct ucc_geth_info ugeth_primary_info = {
+ .uf_info = {
+ .bd_mem_part = MEM_PART_SYSTEM,
+ .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
+ .max_rx_buf_length = 1536,
+ /* adjusted at startup if max-speed 1000 */
+ .urfs = UCC_GETH_URFS_INIT,
+ .urfet = UCC_GETH_URFET_INIT,
+ .urfset = UCC_GETH_URFSET_INIT,
+ .utfs = UCC_GETH_UTFS_INIT,
+ .utfet = UCC_GETH_UTFET_INIT,
+ .utftt = UCC_GETH_UTFTT_INIT,
+ .ufpt = 256,
+ .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
+ .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
+ .tenc = UCC_FAST_TX_ENCODING_NRZ,
+ .renc = UCC_FAST_RX_ENCODING_NRZ,
+ .tcrc = UCC_FAST_16_BIT_CRC,
+ .synl = UCC_FAST_SYNC_LEN_NOT_USED,
+ },
+ .numQueuesTx = 1,
+ .numQueuesRx = 1,
+ .extendedFilteringChainPointer = ((uint32_t) NULL),
+ .typeorlen = 3072 /*1536 */ ,
+ .nonBackToBackIfgPart1 = 0x40,
+ .nonBackToBackIfgPart2 = 0x60,
+ .miminumInterFrameGapEnforcement = 0x50,
+ .backToBackInterFrameGap = 0x60,
+ .mblinterval = 128,
+ .nortsrbytetime = 5,
+ .fracsiz = 1,
+ .strictpriorityq = 0xff,
+ .altBebTruncation = 0xa,
+ .excessDefer = 1,
+ .maxRetransmission = 0xf,
+ .collisionWindow = 0x37,
+ .receiveFlowControl = 1,
+ .transmitFlowControl = 1,
+ .maxGroupAddrInHash = 4,
+ .maxIndAddrInHash = 4,
+ .prel = 7,
+ .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
+ .minFrameLength = 64,
+ .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
+ .maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */
+ .vlantype = 0x8100,
+ .ecamptr = ((uint32_t) NULL),
+ .eventRegMask = UCCE_OTHER,
+ .pausePeriod = 0xf000,
+ .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
+ .bdRingLenTx = {
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN},
+
+ .bdRingLenRx = {
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN},
+
+ .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
+ .largestexternallookupkeysize =
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
+ .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
+ .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
+ .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
+ .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
+ .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
+ .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
+ .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
+ .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
+ .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+ .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+};
+
+static struct ucc_geth_info ugeth_info[8];
+
+#ifdef DEBUG
+static void mem_disp(u8 *addr, int size)
+{
+ u8 *i;
+ int size16Aling = (size >> 4) << 4;
+ int size4Aling = (size >> 2) << 2;
+ int notAlign = 0;
+ if (size % 16)
+ notAlign = 1;
+
+ for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
+ printk("0x%08x: %08x %08x %08x %08x\r\n",
+ (u32) i,
+ *((u32 *) (i)),
+ *((u32 *) (i + 4)),
+ *((u32 *) (i + 8)), *((u32 *) (i + 12)));
+ if (notAlign == 1)
+ printk("0x%08x: ", (u32) i);
+ for (; (u32) i < (u32) addr + size4Aling; i += 4)
+ printk("%08x ", *((u32 *) (i)));
+ for (; (u32) i < (u32) addr + size; i++)
+ printk("%02x", *((i)));
+ if (notAlign == 1)
+ printk("\r\n");
+}
+#endif /* DEBUG */
+
+static struct list_head *dequeue(struct list_head *lh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ugeth_lock, flags);
+ if (!list_empty(lh)) {
+ struct list_head *node = lh->next;
+ list_del(node);
+ spin_unlock_irqrestore(&ugeth_lock, flags);
+ return node;
+ } else {
+ spin_unlock_irqrestore(&ugeth_lock, flags);
+ return NULL;
+ }
+}
+
+static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
+ u8 __iomem *bd)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(ugeth->ndev,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT);
+ if (!skb)
+ return NULL;
+
+ /* We need the data buffer to be aligned properly. We will reserve
+ * as many bytes as needed to align the data properly
+ */
+ skb_reserve(skb,
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT -
+ (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
+ 1)));
+
+ out_be32(&((struct qe_bd __iomem *)bd)->buf,
+ dma_map_single(ugeth->dev,
+ skb->data,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT,
+ DMA_FROM_DEVICE));
+
+ out_be32((u32 __iomem *)bd,
+ (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
+
+ return skb;
+}
+
+static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
+{
+ u8 __iomem *bd;
+ u32 bd_status;
+ struct sk_buff *skb;
+ int i;
+
+ bd = ugeth->p_rx_bd_ring[rxQ];
+ i = 0;
+
+ do {
+ bd_status = in_be32((u32 __iomem *)bd);
+ skb = get_new_skb(ugeth, bd);
+
+ if (!skb) /* If can not allocate data buffer,
+ abort. Cleanup will be elsewhere */
+ return -ENOMEM;
+
+ ugeth->rx_skbuff[rxQ][i] = skb;
+
+ /* advance the BD pointer */
+ bd += sizeof(struct qe_bd);
+ i++;
+ } while (!(bd_status & R_W));
+
+ return 0;
+}
+
+static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
+ u32 *p_start,
+ u8 num_entries,
+ u32 thread_size,
+ u32 thread_alignment,
+ unsigned int risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ if ((snum = qe_get_snum()) < 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not get SNUM\n");
+ return snum;
+ }
+ if ((i == 0) && skip_page_for_first_entry)
+ /* First entry of Rx does not have page */
+ init_enet_offset = 0;
+ else {
+ init_enet_offset =
+ qe_muram_alloc(thread_size, thread_alignment);
+ if (IS_ERR_VALUE(init_enet_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory\n");
+ qe_put_snum((u8) snum);
+ return -ENOMEM;
+ }
+ }
+ *(p_start++) =
+ ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
+ | risc;
+ }
+
+ return 0;
+}
+
+static int return_init_enet_entries(struct ucc_geth_private *ugeth,
+ u32 *p_start,
+ u8 num_entries,
+ unsigned int risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ u32 val = *p_start;
+
+ /* Check that this entry was actually valid --
+ needed in case failed in allocations */
+ if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
+ snum =
+ (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
+ ENET_INIT_PARAM_SNUM_SHIFT;
+ qe_put_snum((u8) snum);
+ if (!((i == 0) && skip_page_for_first_entry)) {
+ /* First entry of Rx does not have page */
+ init_enet_offset =
+ (val & ENET_INIT_PARAM_PTR_MASK);
+ qe_muram_free(init_enet_offset);
+ }
+ *p_start++ = 0;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef DEBUG
+static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
+ u32 __iomem *p_start,
+ u8 num_entries,
+ u32 thread_size,
+ unsigned int risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ u32 val = in_be32(p_start);
+
+ /* Check that this entry was actually valid --
+ needed in case failed in allocations */
+ if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
+ snum =
+ (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
+ ENET_INIT_PARAM_SNUM_SHIFT;
+ qe_put_snum((u8) snum);
+ if (!((i == 0) && skip_page_for_first_entry)) {
+ /* First entry of Rx does not have page */
+ init_enet_offset =
+ (in_be32(p_start) &
+ ENET_INIT_PARAM_PTR_MASK);
+ pr_info("Init enet entry %d:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)qe_muram_addr(init_enet_offset));
+ mem_disp(qe_muram_addr(init_enet_offset),
+ thread_size);
+ }
+ p_start++;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
+{
+ kfree(enet_addr_cont);
+}
+
+static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
+{
+ out_be16(&reg[0], ((u16)mac[5] << 8) | mac[4]);
+ out_be16(&reg[1], ((u16)mac[3] << 8) | mac[2]);
+ out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]);
+}
+
+static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
+{
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+
+ if (paddr_num >= NUM_OF_PADDRS) {
+ pr_warn("%s: Invalid paddr_num: %u\n", __func__, paddr_num);
+ return -EINVAL;
+ }
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ /* Writing address ff.ff.ff.ff.ff.ff disables address
+ recognition for this register */
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
+
+ return 0;
+}
+
+static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
+ u8 *p_enet_addr)
+{
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+ u32 cecr_subblock;
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+
+ /* Ethernet frames are defined in Little Endian mode,
+ therefore to insert */
+ /* the address to the hash (Big Endian mode), we reverse the bytes.*/
+
+ set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
+
+ qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
+ QE_CR_PROTOCOL_ETHERNET, 0);
+}
+
+#ifdef DEBUG
+static void get_statistics(struct ucc_geth_private *ugeth,
+ struct ucc_geth_tx_firmware_statistics *
+ tx_firmware_statistics,
+ struct ucc_geth_rx_firmware_statistics *
+ rx_firmware_statistics,
+ struct ucc_geth_hardware_statistics *hardware_statistics)
+{
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
+ struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
+
+ ug_regs = ugeth->ug_regs;
+ uf_regs = (struct ucc_fast __iomem *) ug_regs;
+ p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
+ p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
+
+ /* Tx firmware only if user handed pointer and driver actually
+ gathers Tx firmware statistics */
+ if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
+ tx_firmware_statistics->sicoltx =
+ in_be32(&p_tx_fw_statistics_pram->sicoltx);
+ tx_firmware_statistics->mulcoltx =
+ in_be32(&p_tx_fw_statistics_pram->mulcoltx);
+ tx_firmware_statistics->latecoltxfr =
+ in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
+ tx_firmware_statistics->frabortduecol =
+ in_be32(&p_tx_fw_statistics_pram->frabortduecol);
+ tx_firmware_statistics->frlostinmactxer =
+ in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
+ tx_firmware_statistics->carriersenseertx =
+ in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
+ tx_firmware_statistics->frtxok =
+ in_be32(&p_tx_fw_statistics_pram->frtxok);
+ tx_firmware_statistics->txfrexcessivedefer =
+ in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
+ tx_firmware_statistics->txpkts256 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts256);
+ tx_firmware_statistics->txpkts512 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts512);
+ tx_firmware_statistics->txpkts1024 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts1024);
+ tx_firmware_statistics->txpktsjumbo =
+ in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
+ }
+
+ /* Rx firmware only if user handed pointer and driver actually
+ * gathers Rx firmware statistics */
+ if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
+ int i;
+ rx_firmware_statistics->frrxfcser =
+ in_be32(&p_rx_fw_statistics_pram->frrxfcser);
+ rx_firmware_statistics->fraligner =
+ in_be32(&p_rx_fw_statistics_pram->fraligner);
+ rx_firmware_statistics->inrangelenrxer =
+ in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
+ rx_firmware_statistics->outrangelenrxer =
+ in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
+ rx_firmware_statistics->frtoolong =
+ in_be32(&p_rx_fw_statistics_pram->frtoolong);
+ rx_firmware_statistics->runt =
+ in_be32(&p_rx_fw_statistics_pram->runt);
+ rx_firmware_statistics->verylongevent =
+ in_be32(&p_rx_fw_statistics_pram->verylongevent);
+ rx_firmware_statistics->symbolerror =
+ in_be32(&p_rx_fw_statistics_pram->symbolerror);
+ rx_firmware_statistics->dropbsy =
+ in_be32(&p_rx_fw_statistics_pram->dropbsy);
+ for (i = 0; i < 0x8; i++)
+ rx_firmware_statistics->res0[i] =
+ p_rx_fw_statistics_pram->res0[i];
+ rx_firmware_statistics->mismatchdrop =
+ in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
+ rx_firmware_statistics->underpkts =
+ in_be32(&p_rx_fw_statistics_pram->underpkts);
+ rx_firmware_statistics->pkts256 =
+ in_be32(&p_rx_fw_statistics_pram->pkts256);
+ rx_firmware_statistics->pkts512 =
+ in_be32(&p_rx_fw_statistics_pram->pkts512);
+ rx_firmware_statistics->pkts1024 =
+ in_be32(&p_rx_fw_statistics_pram->pkts1024);
+ rx_firmware_statistics->pktsjumbo =
+ in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
+ rx_firmware_statistics->frlossinmacer =
+ in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
+ rx_firmware_statistics->pausefr =
+ in_be32(&p_rx_fw_statistics_pram->pausefr);
+ for (i = 0; i < 0x4; i++)
+ rx_firmware_statistics->res1[i] =
+ p_rx_fw_statistics_pram->res1[i];
+ rx_firmware_statistics->removevlan =
+ in_be32(&p_rx_fw_statistics_pram->removevlan);
+ rx_firmware_statistics->replacevlan =
+ in_be32(&p_rx_fw_statistics_pram->replacevlan);
+ rx_firmware_statistics->insertvlan =
+ in_be32(&p_rx_fw_statistics_pram->insertvlan);
+ }
+
+ /* Hardware only if user handed pointer and driver actually
+ gathers hardware statistics */
+ if (hardware_statistics &&
+ (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
+ hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
+ hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
+ hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
+ hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
+ hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
+ hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
+ hardware_statistics->txok = in_be32(&ug_regs->txok);
+ hardware_statistics->txcf = in_be16(&ug_regs->txcf);
+ hardware_statistics->tmca = in_be32(&ug_regs->tmca);
+ hardware_statistics->tbca = in_be32(&ug_regs->tbca);
+ hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
+ hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
+ hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
+ hardware_statistics->rmca = in_be32(&ug_regs->rmca);
+ hardware_statistics->rbca = in_be32(&ug_regs->rbca);
+ }
+}
+
+static void dump_bds(struct ucc_geth_private *ugeth)
+{
+ int i;
+ int length;
+
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ if (ugeth->p_tx_bd_ring[i]) {
+ length =
+ (ugeth->ug_info->bdRingLenTx[i] *
+ sizeof(struct qe_bd));
+ pr_info("TX BDs[%d]\n", i);
+ mem_disp(ugeth->p_tx_bd_ring[i], length);
+ }
+ }
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ if (ugeth->p_rx_bd_ring[i]) {
+ length =
+ (ugeth->ug_info->bdRingLenRx[i] *
+ sizeof(struct qe_bd));
+ pr_info("RX BDs[%d]\n", i);
+ mem_disp(ugeth->p_rx_bd_ring[i], length);
+ }
+ }
+}
+
+static void dump_regs(struct ucc_geth_private *ugeth)
+{
+ int i;
+
+ pr_info("UCC%d Geth registers:\n", ugeth->ug_info->uf_info.ucc_num + 1);
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->ug_regs);
+
+ pr_info("maccfg1 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->maccfg1,
+ in_be32(&ugeth->ug_regs->maccfg1));
+ pr_info("maccfg2 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->maccfg2,
+ in_be32(&ugeth->ug_regs->maccfg2));
+ pr_info("ipgifg : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->ipgifg,
+ in_be32(&ugeth->ug_regs->ipgifg));
+ pr_info("hafdup : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->hafdup,
+ in_be32(&ugeth->ug_regs->hafdup));
+ pr_info("ifctl : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->ifctl,
+ in_be32(&ugeth->ug_regs->ifctl));
+ pr_info("ifstat : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->ifstat,
+ in_be32(&ugeth->ug_regs->ifstat));
+ pr_info("macstnaddr1: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->macstnaddr1,
+ in_be32(&ugeth->ug_regs->macstnaddr1));
+ pr_info("macstnaddr2: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->macstnaddr2,
+ in_be32(&ugeth->ug_regs->macstnaddr2));
+ pr_info("uempr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->uempr,
+ in_be32(&ugeth->ug_regs->uempr));
+ pr_info("utbipar : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->utbipar,
+ in_be32(&ugeth->ug_regs->utbipar));
+ pr_info("uescr : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->ug_regs->uescr,
+ in_be16(&ugeth->ug_regs->uescr));
+ pr_info("tx64 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tx64,
+ in_be32(&ugeth->ug_regs->tx64));
+ pr_info("tx127 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tx127,
+ in_be32(&ugeth->ug_regs->tx127));
+ pr_info("tx255 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tx255,
+ in_be32(&ugeth->ug_regs->tx255));
+ pr_info("rx64 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rx64,
+ in_be32(&ugeth->ug_regs->rx64));
+ pr_info("rx127 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rx127,
+ in_be32(&ugeth->ug_regs->rx127));
+ pr_info("rx255 : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rx255,
+ in_be32(&ugeth->ug_regs->rx255));
+ pr_info("txok : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->txok,
+ in_be32(&ugeth->ug_regs->txok));
+ pr_info("txcf : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->ug_regs->txcf,
+ in_be16(&ugeth->ug_regs->txcf));
+ pr_info("tmca : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tmca,
+ in_be32(&ugeth->ug_regs->tmca));
+ pr_info("tbca : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->tbca,
+ in_be32(&ugeth->ug_regs->tbca));
+ pr_info("rxfok : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rxfok,
+ in_be32(&ugeth->ug_regs->rxfok));
+ pr_info("rxbok : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rxbok,
+ in_be32(&ugeth->ug_regs->rxbok));
+ pr_info("rbyt : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rbyt,
+ in_be32(&ugeth->ug_regs->rbyt));
+ pr_info("rmca : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rmca,
+ in_be32(&ugeth->ug_regs->rmca));
+ pr_info("rbca : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->rbca,
+ in_be32(&ugeth->ug_regs->rbca));
+ pr_info("scar : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->scar,
+ in_be32(&ugeth->ug_regs->scar));
+ pr_info("scam : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->ug_regs->scam,
+ in_be32(&ugeth->ug_regs->scam));
+
+ if (ugeth->p_thread_data_tx) {
+ int numThreadsTxNumerical;
+ switch (ugeth->ug_info->numThreadsTx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsTxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsTxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsTxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsTxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsTxNumerical = 8;
+ break;
+ default:
+ numThreadsTxNumerical = 0;
+ break;
+ }
+
+ pr_info("Thread data TXs:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_thread_data_tx);
+ for (i = 0; i < numThreadsTxNumerical; i++) {
+ pr_info("Thread data TX[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_thread_data_tx[i]);
+ mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
+ sizeof(struct ucc_geth_thread_data_tx));
+ }
+ }
+ if (ugeth->p_thread_data_rx) {
+ int numThreadsRxNumerical;
+ switch (ugeth->ug_info->numThreadsRx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsRxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsRxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsRxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsRxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsRxNumerical = 8;
+ break;
+ default:
+ numThreadsRxNumerical = 0;
+ break;
+ }
+
+ pr_info("Thread data RX:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_thread_data_rx);
+ for (i = 0; i < numThreadsRxNumerical; i++) {
+ pr_info("Thread data RX[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_thread_data_rx[i]);
+ mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
+ sizeof(struct ucc_geth_thread_data_rx));
+ }
+ }
+ if (ugeth->p_exf_glbl_param) {
+ pr_info("EXF global param:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_exf_glbl_param);
+ mem_disp((u8 *) ugeth->p_exf_glbl_param,
+ sizeof(*ugeth->p_exf_glbl_param));
+ }
+ if (ugeth->p_tx_glbl_pram) {
+ pr_info("TX global param:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_tx_glbl_pram);
+ pr_info("temoder : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_tx_glbl_pram->temoder,
+ in_be16(&ugeth->p_tx_glbl_pram->temoder));
+ pr_info("sqptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->sqptr,
+ in_be32(&ugeth->p_tx_glbl_pram->sqptr));
+ pr_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->schedulerbasepointer,
+ in_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer));
+ pr_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->txrmonbaseptr,
+ in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
+ pr_info("tstate : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->tstate,
+ in_be32(&ugeth->p_tx_glbl_pram->tstate));
+ pr_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[0],
+ ugeth->p_tx_glbl_pram->iphoffset[0]);
+ pr_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[1],
+ ugeth->p_tx_glbl_pram->iphoffset[1]);
+ pr_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[2],
+ ugeth->p_tx_glbl_pram->iphoffset[2]);
+ pr_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[3],
+ ugeth->p_tx_glbl_pram->iphoffset[3]);
+ pr_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[4],
+ ugeth->p_tx_glbl_pram->iphoffset[4]);
+ pr_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[5],
+ ugeth->p_tx_glbl_pram->iphoffset[5]);
+ pr_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[6],
+ ugeth->p_tx_glbl_pram->iphoffset[6]);
+ pr_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_tx_glbl_pram->iphoffset[7],
+ ugeth->p_tx_glbl_pram->iphoffset[7]);
+ pr_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[0],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
+ pr_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[1],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
+ pr_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[2],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
+ pr_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[3],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
+ pr_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[4],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
+ pr_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[5],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
+ pr_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[6],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
+ pr_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->vtagtable[7],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
+ pr_info("tqptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_tx_glbl_pram->tqptr,
+ in_be32(&ugeth->p_tx_glbl_pram->tqptr));
+ }
+ if (ugeth->p_rx_glbl_pram) {
+ pr_info("RX global param:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_glbl_pram);
+ pr_info("remoder : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->remoder,
+ in_be32(&ugeth->p_rx_glbl_pram->remoder));
+ pr_info("rqptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rqptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rqptr));
+ pr_info("typeorlen : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->typeorlen,
+ in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
+ pr_info("rxgstpack : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rxgstpack,
+ ugeth->p_rx_glbl_pram->rxgstpack);
+ pr_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rxrmonbaseptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
+ pr_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->intcoalescingptr,
+ in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
+ pr_info("rstate : addr - 0x%08x, val - 0x%02x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rstate,
+ ugeth->p_rx_glbl_pram->rstate);
+ pr_info("mrblr : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->mrblr,
+ in_be16(&ugeth->p_rx_glbl_pram->mrblr));
+ pr_info("rbdqptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->rbdqptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
+ pr_info("mflr : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->mflr,
+ in_be16(&ugeth->p_rx_glbl_pram->mflr));
+ pr_info("minflr : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->minflr,
+ in_be16(&ugeth->p_rx_glbl_pram->minflr));
+ pr_info("maxd1 : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->maxd1,
+ in_be16(&ugeth->p_rx_glbl_pram->maxd1));
+ pr_info("maxd2 : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->maxd2,
+ in_be16(&ugeth->p_rx_glbl_pram->maxd2));
+ pr_info("ecamptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->ecamptr,
+ in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
+ pr_info("l2qt : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l2qt,
+ in_be32(&ugeth->p_rx_glbl_pram->l2qt));
+ pr_info("l3qt[0] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[0],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
+ pr_info("l3qt[1] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[1],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
+ pr_info("l3qt[2] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[2],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
+ pr_info("l3qt[3] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[3],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
+ pr_info("l3qt[4] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[4],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
+ pr_info("l3qt[5] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[5],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
+ pr_info("l3qt[6] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[6],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
+ pr_info("l3qt[7] : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->l3qt[7],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
+ pr_info("vlantype : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->vlantype,
+ in_be16(&ugeth->p_rx_glbl_pram->vlantype));
+ pr_info("vlantci : addr - 0x%08x, val - 0x%04x\n",
+ (u32)&ugeth->p_rx_glbl_pram->vlantci,
+ in_be16(&ugeth->p_rx_glbl_pram->vlantci));
+ for (i = 0; i < 64; i++)
+ pr_info("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x\n",
+ i,
+ (u32)&ugeth->p_rx_glbl_pram->addressfiltering[i],
+ ugeth->p_rx_glbl_pram->addressfiltering[i]);
+ pr_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_glbl_pram->exfGlobalParam,
+ in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
+ }
+ if (ugeth->p_send_q_mem_reg) {
+ pr_info("Send Q memory registers:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg);
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ pr_info("SQQD[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_send_q_mem_reg->sqqd[i]);
+ mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
+ sizeof(struct ucc_geth_send_queue_qd));
+ }
+ }
+ if (ugeth->p_scheduler) {
+ pr_info("Scheduler:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_scheduler);
+ mem_disp((u8 *) ugeth->p_scheduler,
+ sizeof(*ugeth->p_scheduler));
+ }
+ if (ugeth->p_tx_fw_statistics_pram) {
+ pr_info("TX FW statistics pram:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_tx_fw_statistics_pram);
+ mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
+ sizeof(*ugeth->p_tx_fw_statistics_pram));
+ }
+ if (ugeth->p_rx_fw_statistics_pram) {
+ pr_info("RX FW statistics pram:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_rx_fw_statistics_pram);
+ mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
+ sizeof(*ugeth->p_rx_fw_statistics_pram));
+ }
+ if (ugeth->p_rx_irq_coalescing_tbl) {
+ pr_info("RX IRQ coalescing tables:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)ugeth->p_rx_irq_coalescing_tbl);
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ pr_info("RX IRQ coalescing table entry[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i]);
+ pr_info("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].interruptcoalescingmaxvalue,
+ in_be32(&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].
+ interruptcoalescingmaxvalue));
+ pr_info("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].interruptcoalescingcounter,
+ in_be32(&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].
+ interruptcoalescingcounter));
+ }
+ }
+ if (ugeth->p_rx_bd_qs_tbl) {
+ pr_info("RX BD QS tables:\n");
+ pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl);
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ pr_info("RX BD QS table[%d]:\n", i);
+ pr_info("Base address: 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i]);
+ pr_info("bdbaseptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
+ pr_info("bdptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i].bdptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
+ pr_info("externalbdbaseptr: addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].
+ externalbdbaseptr));
+ pr_info("externalbdptr : addr - 0x%08x, val - 0x%08x\n",
+ (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
+ pr_info("ucode RX Prefetched BDs:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32)qe_muram_addr(in_be32
+ (&ugeth->p_rx_bd_qs_tbl[i].
+ bdbaseptr)));
+ mem_disp((u8 *)
+ qe_muram_addr(in_be32
+ (&ugeth->p_rx_bd_qs_tbl[i].
+ bdbaseptr)),
+ sizeof(struct ucc_geth_rx_prefetched_bds));
+ }
+ }
+ if (ugeth->p_init_enet_param_shadow) {
+ int size;
+ pr_info("Init enet param shadow:\n");
+ pr_info("Base address: 0x%08x\n",
+ (u32) ugeth->p_init_enet_param_shadow);
+ mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
+ sizeof(*ugeth->p_init_enet_param_shadow));
+
+ size = sizeof(struct ucc_geth_thread_rx_pram);
+ if (ugeth->ug_info->rxExtendedFiltering) {
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
+ if (ugeth->ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
+ if (ugeth->ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
+ }
+
+ dump_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_TX,
+ sizeof(struct ucc_geth_thread_tx_pram),
+ ugeth->ug_info->riscTx, 0);
+ dump_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ rxthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
+ ugeth->ug_info->riscRx, 1);
+ }
+}
+#endif /* DEBUG */
+
+static void init_default_reg_vals(u32 __iomem *upsmr_register,
+ u32 __iomem *maccfg1_register,
+ u32 __iomem *maccfg2_register)
+{
+ out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
+ out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
+ out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
+}
+
+static int init_half_duplex_params(int alt_beb,
+ int back_pressure_no_backoff,
+ int no_backoff,
+ int excess_defer,
+ u8 alt_beb_truncation,
+ u8 max_retransmissions,
+ u8 collision_window,
+ u32 __iomem *hafdup_register)
+{
+ u32 value = 0;
+
+ if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
+ (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
+ (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
+ return -EINVAL;
+
+ value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
+
+ if (alt_beb)
+ value |= HALFDUP_ALT_BEB;
+ if (back_pressure_no_backoff)
+ value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
+ if (no_backoff)
+ value |= HALFDUP_NO_BACKOFF;
+ if (excess_defer)
+ value |= HALFDUP_EXCESSIVE_DEFER;
+
+ value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
+
+ value |= collision_window;
+
+ out_be32(hafdup_register, value);
+ return 0;
+}
+
+static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
+ u8 non_btb_ipg,
+ u8 min_ifg,
+ u8 btb_ipg,
+ u32 __iomem *ipgifg_register)
+{
+ u32 value = 0;
+
+ /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
+ IPG part 2 */
+ if (non_btb_cs_ipg > non_btb_ipg)
+ return -EINVAL;
+
+ if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
+ (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
+ /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
+ (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
+ return -EINVAL;
+
+ value |=
+ ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
+ IPGIFG_NBTB_CS_IPG_MASK);
+ value |=
+ ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
+ IPGIFG_NBTB_IPG_MASK);
+ value |=
+ ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
+ IPGIFG_MIN_IFG_MASK);
+ value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
+
+ out_be32(ipgifg_register, value);
+ return 0;
+}
+
+int init_flow_control_params(u32 automatic_flow_control_mode,
+ int rx_flow_control_enable,
+ int tx_flow_control_enable,
+ u16 pause_period,
+ u16 extension_field,
+ u32 __iomem *upsmr_register,
+ u32 __iomem *uempr_register,
+ u32 __iomem *maccfg1_register)
+{
+ u32 value = 0;
+
+ /* Set UEMPR register */
+ value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
+ value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
+ out_be32(uempr_register, value);
+
+ /* Set UPSMR register */
+ setbits32(upsmr_register, automatic_flow_control_mode);
+
+ value = in_be32(maccfg1_register);
+ if (rx_flow_control_enable)
+ value |= MACCFG1_FLOW_RX;
+ if (tx_flow_control_enable)
+ value |= MACCFG1_FLOW_TX;
+ out_be32(maccfg1_register, value);
+
+ return 0;
+}
+
+static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
+ int auto_zero_hardware_statistics,
+ u32 __iomem *upsmr_register,
+ u16 __iomem *uescr_register)
+{
+ u16 uescr_value = 0;
+
+ /* Enable hardware statistics gathering if requested */
+ if (enable_hardware_statistics)
+ setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
+
+ /* Clear hardware statistics counters */
+ uescr_value = in_be16(uescr_register);
+ uescr_value |= UESCR_CLRCNT;
+ /* Automatically zero hardware statistics counters on read,
+ if requested */
+ if (auto_zero_hardware_statistics)
+ uescr_value |= UESCR_AUTOZ;
+ out_be16(uescr_register, uescr_value);
+
+ return 0;
+}
+
+static int init_firmware_statistics_gathering_mode(int
+ enable_tx_firmware_statistics,
+ int enable_rx_firmware_statistics,
+ u32 __iomem *tx_rmon_base_ptr,
+ u32 tx_firmware_statistics_structure_address,
+ u32 __iomem *rx_rmon_base_ptr,
+ u32 rx_firmware_statistics_structure_address,
+ u16 __iomem *temoder_register,
+ u32 __iomem *remoder_register)
+{
+ /* Note: this function does not check if */
+ /* the parameters it receives are NULL */
+
+ if (enable_tx_firmware_statistics) {
+ out_be32(tx_rmon_base_ptr,
+ tx_firmware_statistics_structure_address);
+ setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
+ }
+
+ if (enable_rx_firmware_statistics) {
+ out_be32(rx_rmon_base_ptr,
+ rx_firmware_statistics_structure_address);
+ setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
+ }
+
+ return 0;
+}
+
+static int init_mac_station_addr_regs(u8 address_byte_0,
+ u8 address_byte_1,
+ u8 address_byte_2,
+ u8 address_byte_3,
+ u8 address_byte_4,
+ u8 address_byte_5,
+ u32 __iomem *macstnaddr1_register,
+ u32 __iomem *macstnaddr2_register)
+{
+ u32 value = 0;
+
+ /* Example: for a station address of 0x12345678ABCD, */
+ /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
+
+ /* MACSTNADDR1 Register: */
+
+ /* 0 7 8 15 */
+ /* station address byte 5 station address byte 4 */
+ /* 16 23 24 31 */
+ /* station address byte 3 station address byte 2 */
+ value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
+ value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
+ value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
+ value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
+
+ out_be32(macstnaddr1_register, value);
+
+ /* MACSTNADDR2 Register: */
+
+ /* 0 7 8 15 */
+ /* station address byte 1 station address byte 0 */
+ /* 16 23 24 31 */
+ /* reserved reserved */
+ value = 0;
+ value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
+ value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
+
+ out_be32(macstnaddr2_register, value);
+
+ return 0;
+}
+
+static int init_check_frame_length_mode(int length_check,
+ u32 __iomem *maccfg2_register)
+{
+ u32 value = 0;
+
+ value = in_be32(maccfg2_register);
+
+ if (length_check)
+ value |= MACCFG2_LC;
+ else
+ value &= ~MACCFG2_LC;
+
+ out_be32(maccfg2_register, value);
+ return 0;
+}
+
+static int init_preamble_length(u8 preamble_length,
+ u32 __iomem *maccfg2_register)
+{
+ if ((preamble_length < 3) || (preamble_length > 7))
+ return -EINVAL;
+
+ clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
+ preamble_length << MACCFG2_PREL_SHIFT);
+
+ return 0;
+}
+
+static int init_rx_parameters(int reject_broadcast,
+ int receive_short_frames,
+ int promiscuous, u32 __iomem *upsmr_register)
+{
+ u32 value = 0;
+
+ value = in_be32(upsmr_register);
+
+ if (reject_broadcast)
+ value |= UCC_GETH_UPSMR_BRO;
+ else
+ value &= ~UCC_GETH_UPSMR_BRO;
+
+ if (receive_short_frames)
+ value |= UCC_GETH_UPSMR_RSH;
+ else
+ value &= ~UCC_GETH_UPSMR_RSH;
+
+ if (promiscuous)
+ value |= UCC_GETH_UPSMR_PRO;
+ else
+ value &= ~UCC_GETH_UPSMR_PRO;
+
+ out_be32(upsmr_register, value);
+
+ return 0;
+}
+
+static int init_max_rx_buff_len(u16 max_rx_buf_len,
+ u16 __iomem *mrblr_register)
+{
+ /* max_rx_buf_len value must be a multiple of 128 */
+ if ((max_rx_buf_len == 0) ||
+ (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
+ return -EINVAL;
+
+ out_be16(mrblr_register, max_rx_buf_len);
+ return 0;
+}
+
+static int init_min_frame_len(u16 min_frame_length,
+ u16 __iomem *minflr_register,
+ u16 __iomem *mrblr_register)
+{
+ u16 mrblr_value = 0;
+
+ mrblr_value = in_be16(mrblr_register);
+ if (min_frame_length >= (mrblr_value - 4))
+ return -EINVAL;
+
+ out_be16(minflr_register, min_frame_length);
+ return 0;
+}
+
+static int adjust_enet_interface(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_fast __iomem *uf_regs;
+ int ret_val;
+ u32 upsmr, maccfg2;
+ u16 value;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ ug_info = ugeth->ug_info;
+ ug_regs = ugeth->ug_regs;
+ uf_regs = ugeth->uccf->uf_regs;
+
+ /* Set MACCFG2 */
+ maccfg2 = in_be32(&ug_regs->maccfg2);
+ maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+ if ((ugeth->max_speed == SPEED_10) ||
+ (ugeth->max_speed == SPEED_100))
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ else if (ugeth->max_speed == SPEED_1000)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+ maccfg2 |= ug_info->padAndCrc;
+ out_be32(&ug_regs->maccfg2, maccfg2);
+
+ /* Set UPSMR */
+ upsmr = in_be32(&uf_regs->upsmr);
+ upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
+ UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
+ if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
+ upsmr |= UCC_GETH_UPSMR_RPM;
+ switch (ugeth->max_speed) {
+ case SPEED_10:
+ upsmr |= UCC_GETH_UPSMR_R10M;
+ /* FALLTHROUGH */
+ case SPEED_100:
+ if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
+ upsmr |= UCC_GETH_UPSMR_RMM;
+ }
+ }
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
+ upsmr |= UCC_GETH_UPSMR_TBIM;
+ }
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
+ upsmr |= UCC_GETH_UPSMR_SGMM;
+
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ /* Disable autonegotiation in tbi mode, because by default it
+ comes up in autonegotiation mode. */
+ /* Note that this depends on proper setting in utbipar register. */
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ pr_warn("TBI mode requires that the device tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ pr_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
+ value &= ~0x1000; /* Turn off autonegotiation */
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
+ }
+
+ init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
+
+ ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
+ if (ret_val != 0) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Preamble length must be between 3 and 7 inclusive\n");
+ return ret_val;
+ }
+
+ return 0;
+}
+
+static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_fast_private *uccf;
+ u32 cecr_subblock;
+ u32 temp;
+ int i = 10;
+
+ uccf = ugeth->uccf;
+
+ /* Mask GRACEFUL STOP TX interrupt bit and clear it */
+ clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
+ out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */
+
+ /* Issue host command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
+ QE_CR_PROTOCOL_ETHERNET, 0);
+
+ /* Wait for command to complete */
+ do {
+ msleep(10);
+ temp = in_be32(uccf->p_ucce);
+ } while (!(temp & UCC_GETH_UCCE_GRA) && --i);
+
+ uccf->stopped_tx = 1;
+
+ return 0;
+}
+
+static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_fast_private *uccf;
+ u32 cecr_subblock;
+ u8 temp;
+ int i = 10;
+
+ uccf = ugeth->uccf;
+
+ /* Clear acknowledge bit */
+ temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
+ temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
+ out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
+
+ /* Keep issuing command and checking acknowledge bit until
+ it is asserted, according to spec */
+ do {
+ /* Issue host command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
+ ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
+ QE_CR_PROTOCOL_ETHERNET, 0);
+ msleep(10);
+ temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
+ } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i);
+
+ uccf->stopped_rx = 1;
+
+ return 0;
+}
+
+static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_fast_private *uccf;
+ u32 cecr_subblock;
+
+ uccf = ugeth->uccf;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
+ uccf->stopped_tx = 0;
+
+ return 0;
+}
+
+static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_fast_private *uccf;
+ u32 cecr_subblock;
+
+ uccf = ugeth->uccf;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
+ 0);
+ uccf->stopped_rx = 0;
+
+ return 0;
+}
+
+static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
+{
+ struct ucc_fast_private *uccf;
+ int enabled_tx, enabled_rx;
+
+ uccf = ugeth->uccf;
+
+ /* check if the UCC number is in range. */
+ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+ if (netif_msg_probe(ugeth))
+ pr_err("ucc_num out of range\n");
+ return -EINVAL;
+ }
+
+ enabled_tx = uccf->enabled_tx;
+ enabled_rx = uccf->enabled_rx;
+
+ /* Get Tx and Rx going again, in case this channel was actively
+ disabled. */
+ if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
+ ugeth_restart_tx(ugeth);
+ if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
+ ugeth_restart_rx(ugeth);
+
+ ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
+
+ return 0;
+
+}
+
+static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
+{
+ struct ucc_fast_private *uccf;
+
+ uccf = ugeth->uccf;
+
+ /* check if the UCC number is in range. */
+ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+ if (netif_msg_probe(ugeth))
+ pr_err("ucc_num out of range\n");
+ return -EINVAL;
+ }
+
+ /* Stop any transmissions */
+ if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
+ ugeth_graceful_stop_tx(ugeth);
+
+ /* Stop any receptions */
+ if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
+ ugeth_graceful_stop_rx(ugeth);
+
+ ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
+
+ return 0;
+}
+
+static void ugeth_quiesce(struct ucc_geth_private *ugeth)
+{
+ /* Prevent any further xmits, plus detach the device. */
+ netif_device_detach(ugeth->ndev);
+
+ /* Wait for any current xmits to finish. */
+ netif_tx_disable(ugeth->ndev);
+
+ /* Disable the interrupt to avoid NAPI rescheduling. */
+ disable_irq(ugeth->ug_info->uf_info.irq);
+
+ /* Stop NAPI, and possibly wait for its completion. */
+ napi_disable(&ugeth->napi);
+}
+
+static void ugeth_activate(struct ucc_geth_private *ugeth)
+{
+ napi_enable(&ugeth->napi);
+ enable_irq(ugeth->ug_info->uf_info.irq);
+ netif_device_attach(ugeth->ndev);
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the ugeth structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+
+static void adjust_link(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_fast __iomem *uf_regs;
+ struct phy_device *phydev = ugeth->phydev;
+ int new_state = 0;
+
+ ug_regs = ugeth->ug_regs;
+ uf_regs = ugeth->uccf->uf_regs;
+
+ if (phydev->link) {
+ u32 tempval = in_be32(&ug_regs->maccfg2);
+ u32 upsmr = in_be32(&uf_regs->upsmr);
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (phydev->duplex != ugeth->oldduplex) {
+ new_state = 1;
+ if (!(phydev->duplex))
+ tempval &= ~(MACCFG2_FDX);
+ else
+ tempval |= MACCFG2_FDX;
+ ugeth->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != ugeth->oldspeed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case SPEED_1000:
+ tempval = ((tempval &
+ ~(MACCFG2_INTERFACE_MODE_MASK)) |
+ MACCFG2_INTERFACE_MODE_BYTE);
+ break;
+ case SPEED_100:
+ case SPEED_10:
+ tempval = ((tempval &
+ ~(MACCFG2_INTERFACE_MODE_MASK)) |
+ MACCFG2_INTERFACE_MODE_NIBBLE);
+ /* if reduced mode, re-set UPSMR.R10M */
+ if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
+ (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
+ if (phydev->speed == SPEED_10)
+ upsmr |= UCC_GETH_UPSMR_R10M;
+ else
+ upsmr &= ~UCC_GETH_UPSMR_R10M;
+ }
+ break;
+ default:
+ if (netif_msg_link(ugeth))
+ pr_warn(
+ "%s: Ack! Speed (%d) is not 10/100/1000!",
+ dev->name, phydev->speed);
+ break;
+ }
+ ugeth->oldspeed = phydev->speed;
+ }
+
+ if (!ugeth->oldlink) {
+ new_state = 1;
+ ugeth->oldlink = 1;
+ }
+
+ if (new_state) {
+ /*
+ * To change the MAC configuration we need to disable
+ * the controller. To do so, we have to either grab
+ * ugeth->lock, which is a bad idea since 'graceful
+ * stop' commands might take quite a while, or we can
+ * quiesce driver's activity.
+ */
+ ugeth_quiesce(ugeth);
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ out_be32(&ug_regs->maccfg2, tempval);
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ ugeth_activate(ugeth);
+ }
+ } else if (ugeth->oldlink) {
+ new_state = 1;
+ ugeth->oldlink = 0;
+ ugeth->oldspeed = 0;
+ ugeth->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(ugeth))
+ phy_print_status(phydev);
+}
+
+/* Initialize TBI PHY interface for communicating with the
+ * SERDES lynx PHY on the chip. We communicate with this PHY
+ * through the MDIO bus on each controller, treating it as a
+ * "normal" PHY at the address found in the UTBIPA register. We assume
+ * that the UTBIPA register is valid. Either the MDIO bus code will set
+ * it to a value that doesn't conflict with other PHYs on the bus, or the
+ * value doesn't matter, as there are no other PHYs on the bus.
+ */
+static void uec_configure_serdes(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node) {
+ dev_warn(&dev->dev, "SGMII mode requires that the device "
+ "tree specify a tbi-handle\n");
+ return;
+ }
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy) {
+ dev_err(&dev->dev, "error: Could not get TBI device\n");
+ return;
+ }
+
+ /*
+ * If the link is already up, we must already be ok, and don't need to
+ * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
+ * everything for us? Resetting it takes the link down and requires
+ * several seconds for it to come back.
+ */
+ if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
+ return;
+
+ /* Single clk mode, mii mode off(for serdes communication) */
+ phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
+
+ phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
+
+ phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
+}
+
+/* Configure the PHY for dev.
+ * returns 0 if success. -1 if failure
+ */
+static int init_phy(struct net_device *dev)
+{
+ struct ucc_geth_private *priv = netdev_priv(dev);
+ struct ucc_geth_info *ug_info = priv->ug_info;
+ struct phy_device *phydev;
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
+ priv->phy_interface);
+ if (!phydev) {
+ dev_err(&dev->dev, "Could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ uec_configure_serdes(dev);
+
+ phydev->supported &= (SUPPORTED_MII |
+ SUPPORTED_Autoneg |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full);
+
+ if (priv->max_speed == SPEED_1000)
+ phydev->supported |= ADVERTISED_1000baseT_Full;
+
+ phydev->advertising = phydev->supported;
+
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
+{
+#ifdef DEBUG
+ ucc_fast_dump_regs(ugeth->uccf);
+ dump_regs(ugeth);
+ dump_bds(ugeth);
+#endif
+}
+
+static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
+ ugeth,
+ enum enet_addr_type
+ enet_addr_type)
+{
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+ struct ucc_fast_private *uccf;
+ enum comm_dir comm_dir;
+ struct list_head *p_lh;
+ u16 i, num;
+ u32 __iomem *addr_h;
+ u32 __iomem *addr_l;
+ u8 *p_counter;
+
+ uccf = ugeth->uccf;
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *)
+ ugeth->p_rx_glbl_pram->addressfiltering;
+
+ if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
+ addr_h = &(p_82xx_addr_filt->gaddr_h);
+ addr_l = &(p_82xx_addr_filt->gaddr_l);
+ p_lh = &ugeth->group_hash_q;
+ p_counter = &(ugeth->numGroupAddrInHash);
+ } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
+ addr_h = &(p_82xx_addr_filt->iaddr_h);
+ addr_l = &(p_82xx_addr_filt->iaddr_l);
+ p_lh = &ugeth->ind_hash_q;
+ p_counter = &(ugeth->numIndAddrInHash);
+ } else
+ return -EINVAL;
+
+ comm_dir = 0;
+ if (uccf->enabled_tx)
+ comm_dir |= COMM_DIR_TX;
+ if (uccf->enabled_rx)
+ comm_dir |= COMM_DIR_RX;
+ if (comm_dir)
+ ugeth_disable(ugeth, comm_dir);
+
+ /* Clear the hash table. */
+ out_be32(addr_h, 0x00000000);
+ out_be32(addr_l, 0x00000000);
+
+ if (!p_lh)
+ return 0;
+
+ num = *p_counter;
+
+ /* Delete all remaining CQ elements */
+ for (i = 0; i < num; i++)
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
+
+ *p_counter = 0;
+
+ if (comm_dir)
+ ugeth_enable(ugeth, comm_dir);
+
+ return 0;
+}
+
+static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
+ u8 paddr_num)
+{
+ ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
+ return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
+}
+
+static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ u16 i, j;
+ u8 __iomem *bd;
+
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ if (ugeth->p_rx_bd_ring[i]) {
+ /* Return existing data buffers in ring */
+ bd = ugeth->p_rx_bd_ring[i];
+ for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
+ if (ugeth->rx_skbuff[i][j]) {
+ dma_unmap_single(ugeth->dev,
+ in_be32(&((struct qe_bd __iomem *)bd)->buf),
+ ugeth->ug_info->
+ uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(
+ ugeth->rx_skbuff[i][j]);
+ ugeth->rx_skbuff[i][j] = NULL;
+ }
+ bd += sizeof(struct qe_bd);
+ }
+
+ kfree(ugeth->rx_skbuff[i]);
+
+ if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_SYSTEM)
+ kfree((void *)ugeth->rx_bd_ring_offset[i]);
+ else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM)
+ qe_muram_free(ugeth->rx_bd_ring_offset[i]);
+ ugeth->p_rx_bd_ring[i] = NULL;
+ }
+ }
+
+}
+
+static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ u16 i, j;
+ u8 __iomem *bd;
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ bd = ugeth->p_tx_bd_ring[i];
+ if (!bd)
+ continue;
+ for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
+ if (ugeth->tx_skbuff[i][j]) {
+ dma_unmap_single(ugeth->dev,
+ in_be32(&((struct qe_bd __iomem *)bd)->buf),
+ (in_be32((u32 __iomem *)bd) &
+ BD_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
+ ugeth->tx_skbuff[i][j] = NULL;
+ }
+ }
+
+ kfree(ugeth->tx_skbuff[i]);
+
+ if (ugeth->p_tx_bd_ring[i]) {
+ if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_SYSTEM)
+ kfree((void *)ugeth->tx_bd_ring_offset[i]);
+ else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM)
+ qe_muram_free(ugeth->tx_bd_ring_offset[i]);
+ ugeth->p_tx_bd_ring[i] = NULL;
+ }
+ }
+
+}
+
+static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
+{
+ if (!ugeth)
+ return;
+
+ if (ugeth->uccf) {
+ ucc_fast_free(ugeth->uccf);
+ ugeth->uccf = NULL;
+ }
+
+ if (ugeth->p_thread_data_tx) {
+ qe_muram_free(ugeth->thread_dat_tx_offset);
+ ugeth->p_thread_data_tx = NULL;
+ }
+ if (ugeth->p_thread_data_rx) {
+ qe_muram_free(ugeth->thread_dat_rx_offset);
+ ugeth->p_thread_data_rx = NULL;
+ }
+ if (ugeth->p_exf_glbl_param) {
+ qe_muram_free(ugeth->exf_glbl_param_offset);
+ ugeth->p_exf_glbl_param = NULL;
+ }
+ if (ugeth->p_rx_glbl_pram) {
+ qe_muram_free(ugeth->rx_glbl_pram_offset);
+ ugeth->p_rx_glbl_pram = NULL;
+ }
+ if (ugeth->p_tx_glbl_pram) {
+ qe_muram_free(ugeth->tx_glbl_pram_offset);
+ ugeth->p_tx_glbl_pram = NULL;
+ }
+ if (ugeth->p_send_q_mem_reg) {
+ qe_muram_free(ugeth->send_q_mem_reg_offset);
+ ugeth->p_send_q_mem_reg = NULL;
+ }
+ if (ugeth->p_scheduler) {
+ qe_muram_free(ugeth->scheduler_offset);
+ ugeth->p_scheduler = NULL;
+ }
+ if (ugeth->p_tx_fw_statistics_pram) {
+ qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
+ ugeth->p_tx_fw_statistics_pram = NULL;
+ }
+ if (ugeth->p_rx_fw_statistics_pram) {
+ qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
+ ugeth->p_rx_fw_statistics_pram = NULL;
+ }
+ if (ugeth->p_rx_irq_coalescing_tbl) {
+ qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
+ ugeth->p_rx_irq_coalescing_tbl = NULL;
+ }
+ if (ugeth->p_rx_bd_qs_tbl) {
+ qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
+ ugeth->p_rx_bd_qs_tbl = NULL;
+ }
+ if (ugeth->p_init_enet_param_shadow) {
+ return_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ rxthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_RX,
+ ugeth->ug_info->riscRx, 1);
+ return_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_TX,
+ ugeth->ug_info->riscTx, 0);
+ kfree(ugeth->p_init_enet_param_shadow);
+ ugeth->p_init_enet_param_shadow = NULL;
+ }
+ ucc_geth_free_tx(ugeth);
+ ucc_geth_free_rx(ugeth);
+ while (!list_empty(&ugeth->group_hash_q))
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY
+ (dequeue(&ugeth->group_hash_q)));
+ while (!list_empty(&ugeth->ind_hash_q))
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY
+ (dequeue(&ugeth->ind_hash_q)));
+ if (ugeth->ug_regs) {
+ iounmap(ugeth->ug_regs);
+ ugeth->ug_regs = NULL;
+ }
+}
+
+static void ucc_geth_set_multi(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth;
+ struct netdev_hw_addr *ha;
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+
+ ugeth = netdev_priv(dev);
+
+ uf_regs = ugeth->uccf->uf_regs;
+
+ if (dev->flags & IFF_PROMISC) {
+ setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
+ } else {
+ clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
+ p_rx_glbl_pram->addressfiltering;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Catch all multicast addresses, so set the
+ * filter to all 1's.
+ */
+ out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
+ out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
+ } else {
+ /* Clear filter and add the addresses in the list.
+ */
+ out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
+ out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ /* Ask CPM to run CRC and set bit in
+ * filter mask.
+ */
+ hw_add_addr_in_hash(ugeth, ha->addr);
+ }
+ }
+ }
+}
+
+static void ucc_geth_stop(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
+ struct phy_device *phydev = ugeth->phydev;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ /*
+ * Tell the kernel the link is down.
+ * Must be done before disabling the controller
+ * or deadlock may happen.
+ */
+ phy_stop(phydev);
+
+ /* Disable the controller */
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ /* Mask all interrupts */
+ out_be32(ugeth->uccf->p_uccm, 0x00000000);
+
+ /* Clear all interrupts */
+ out_be32(ugeth->uccf->p_ucce, 0xffffffff);
+
+ /* Disable Rx and Tx */
+ clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
+
+ ucc_geth_memclean(ugeth);
+}
+
+static int ucc_struct_init(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ int i;
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
+ (uf_info->bd_mem_part == MEM_PART_MURAM))) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Bad memory partition value\n");
+ return -EINVAL;
+ }
+
+ /* Rx BD lengths */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
+ (ug_info->bdRingLenRx[i] %
+ UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Rx BD ring length must be multiple of 4, no smaller than 8\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Tx BD lengths */
+ for (i = 0; i < ug_info->numQueuesTx; i++) {
+ if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Tx BD ring length must be no smaller than 2\n");
+ return -EINVAL;
+ }
+ }
+
+ /* mrblr */
+ if ((uf_info->max_rx_buf_length == 0) ||
+ (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
+ if (netif_msg_probe(ugeth))
+ pr_err("max_rx_buf_length must be non-zero multiple of 128\n");
+ return -EINVAL;
+ }
+
+ /* num Tx queues */
+ if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
+ if (netif_msg_probe(ugeth))
+ pr_err("number of tx queues too large\n");
+ return -EINVAL;
+ }
+
+ /* num Rx queues */
+ if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
+ if (netif_msg_probe(ugeth))
+ pr_err("number of rx queues too large\n");
+ return -EINVAL;
+ }
+
+ /* l2qt */
+ for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
+ if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
+ if (netif_msg_probe(ugeth))
+ pr_err("VLAN priority table entry must not be larger than number of Rx queues\n");
+ return -EINVAL;
+ }
+ }
+
+ /* l3qt */
+ for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
+ if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
+ if (netif_msg_probe(ugeth))
+ pr_err("IP priority table entry must not be larger than number of Rx queues\n");
+ return -EINVAL;
+ }
+ }
+
+ if (ug_info->cam && !ug_info->ecamptr) {
+ if (netif_msg_probe(ugeth))
+ pr_err("If cam mode is chosen, must supply cam ptr\n");
+ return -EINVAL;
+ }
+
+ if ((ug_info->numStationAddresses !=
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1) &&
+ ug_info->rxExtendedFiltering) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Number of station addresses greater than 1 not allowed in extended parsing mode\n");
+ return -EINVAL;
+ }
+
+ /* Generate uccm_mask for receive */
+ uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
+ for (i = 0; i < ug_info->numQueuesRx; i++)
+ uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
+
+ for (i = 0; i < ug_info->numQueuesTx; i++)
+ uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
+ /* Initialize the general fast UCC block. */
+ if (ucc_fast_init(uf_info, &ugeth->uccf)) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Failed to init uccf\n");
+ return -ENOMEM;
+ }
+
+ /* read the number of risc engines, update the riscTx and riscRx
+ * if there are 4 riscs in QE
+ */
+ if (qe_get_num_of_risc() == 4) {
+ ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS;
+ ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS;
+ }
+
+ ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
+ if (!ugeth->ug_regs) {
+ if (netif_msg_probe(ugeth))
+ pr_err("Failed to ioremap regs\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ int length;
+ u16 i, j;
+ u8 __iomem *bd;
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ /* Allocate Tx bds */
+ for (j = 0; j < ug_info->numQueuesTx; j++) {
+ /* Allocate in multiple of
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
+ according to spec */
+ length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
+ / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
+ * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+ if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
+ length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+ if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
+ u32 align = 4;
+ if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
+ align = UCC_GETH_TX_BD_RING_ALIGNMENT;
+ ugeth->tx_bd_ring_offset[j] =
+ (u32) kmalloc((u32) (length + align), GFP_KERNEL);
+
+ if (ugeth->tx_bd_ring_offset[j] != 0)
+ ugeth->p_tx_bd_ring[j] =
+ (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
+ align) & ~(align - 1));
+ } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
+ ugeth->tx_bd_ring_offset[j] =
+ qe_muram_alloc(length,
+ UCC_GETH_TX_BD_RING_ALIGNMENT);
+ if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
+ ugeth->p_tx_bd_ring[j] =
+ (u8 __iomem *) qe_muram_addr(ugeth->
+ tx_bd_ring_offset[j]);
+ }
+ if (!ugeth->p_tx_bd_ring[j]) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate memory for Tx bd rings\n");
+ return -ENOMEM;
+ }
+ /* Zero unused end of bd ring, according to spec */
+ memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
+ ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
+ length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
+ }
+
+ /* Init Tx bds */
+ for (j = 0; j < ug_info->numQueuesTx; j++) {
+ /* Setup the skbuff rings */
+ ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
+ ugeth->ug_info->bdRingLenTx[j],
+ GFP_KERNEL);
+
+ if (ugeth->tx_skbuff[j] == NULL) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Could not allocate tx_skbuff\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
+ ugeth->tx_skbuff[j][i] = NULL;
+
+ ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
+ bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
+ for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
+ /* clear bd buffer */
+ out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, 0);
+ bd += sizeof(struct qe_bd);
+ }
+ bd -= sizeof(struct qe_bd);
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
+ }
+
+ return 0;
+}
+
+static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ int length;
+ u16 i, j;
+ u8 __iomem *bd;
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ /* Allocate Rx bds */
+ for (j = 0; j < ug_info->numQueuesRx; j++) {
+ length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
+ if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
+ u32 align = 4;
+ if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
+ align = UCC_GETH_RX_BD_RING_ALIGNMENT;
+ ugeth->rx_bd_ring_offset[j] =
+ (u32) kmalloc((u32) (length + align), GFP_KERNEL);
+ if (ugeth->rx_bd_ring_offset[j] != 0)
+ ugeth->p_rx_bd_ring[j] =
+ (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
+ align) & ~(align - 1));
+ } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
+ ugeth->rx_bd_ring_offset[j] =
+ qe_muram_alloc(length,
+ UCC_GETH_RX_BD_RING_ALIGNMENT);
+ if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
+ ugeth->p_rx_bd_ring[j] =
+ (u8 __iomem *) qe_muram_addr(ugeth->
+ rx_bd_ring_offset[j]);
+ }
+ if (!ugeth->p_rx_bd_ring[j]) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate memory for Rx bd rings\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* Init Rx bds */
+ for (j = 0; j < ug_info->numQueuesRx; j++) {
+ /* Setup the skbuff rings */
+ ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
+ ugeth->ug_info->bdRingLenRx[j],
+ GFP_KERNEL);
+
+ if (ugeth->rx_skbuff[j] == NULL) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Could not allocate rx_skbuff\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
+ ugeth->rx_skbuff[j][i] = NULL;
+
+ ugeth->skb_currx[j] = 0;
+ bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
+ for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, R_I);
+ /* clear bd buffer */
+ out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
+ bd += sizeof(struct qe_bd);
+ }
+ bd -= sizeof(struct qe_bd);
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
+ }
+
+ return 0;
+}
+
+static int ucc_geth_startup(struct ucc_geth_private *ugeth)
+{
+ struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
+ struct ucc_geth_init_pram __iomem *p_init_enet_pram;
+ struct ucc_fast_private *uccf;
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_info *uf_info;
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_geth __iomem *ug_regs;
+ int ret_val = -EINVAL;
+ u32 remoder = UCC_GETH_REMODER_INIT;
+ u32 init_enet_pram_offset, cecr_subblock, command;
+ u32 ifstat, i, j, size, l2qt, l3qt;
+ u16 temoder = UCC_GETH_TEMODER_INIT;
+ u16 test;
+ u8 function_code = 0;
+ u8 __iomem *endOfRing;
+ u8 numThreadsRxNumerical, numThreadsTxNumerical;
+
+ ugeth_vdbg("%s: IN", __func__);
+ uccf = ugeth->uccf;
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+ uf_regs = uccf->uf_regs;
+ ug_regs = ugeth->ug_regs;
+
+ switch (ug_info->numThreadsRx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsRxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsRxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsRxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsRxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsRxNumerical = 8;
+ break;
+ default:
+ if (netif_msg_ifup(ugeth))
+ pr_err("Bad number of Rx threads value\n");
+ return -EINVAL;
+ }
+
+ switch (ug_info->numThreadsTx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsTxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsTxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsTxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsTxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsTxNumerical = 8;
+ break;
+ default:
+ if (netif_msg_ifup(ugeth))
+ pr_err("Bad number of Tx threads value\n");
+ return -EINVAL;
+ }
+
+ /* Calculate rx_extended_features */
+ ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
+ ug_info->ipAddressAlignment ||
+ (ug_info->numStationAddresses !=
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
+
+ ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
+ (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) ||
+ (ug_info->vlanOperationNonTagged !=
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
+
+ init_default_reg_vals(&uf_regs->upsmr,
+ &ug_regs->maccfg1, &ug_regs->maccfg2);
+
+ /* Set UPSMR */
+ /* For more details see the hardware spec. */
+ init_rx_parameters(ug_info->bro,
+ ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
+
+ /* We're going to ignore other registers for now, */
+ /* except as needed to get up and running */
+
+ /* Set MACCFG1 */
+ /* For more details see the hardware spec. */
+ init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &uf_regs->upsmr,
+ &ug_regs->uempr, &ug_regs->maccfg1);
+
+ setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
+
+ /* Set IPGIFG */
+ /* For more details see the hardware spec. */
+ ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
+ ug_info->nonBackToBackIfgPart2,
+ ug_info->
+ miminumInterFrameGapEnforcement,
+ ug_info->backToBackInterFrameGap,
+ &ug_regs->ipgifg);
+ if (ret_val != 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("IPGIFG initialization parameter too large\n");
+ return ret_val;
+ }
+
+ /* Set HAFDUP */
+ /* For more details see the hardware spec. */
+ ret_val = init_half_duplex_params(ug_info->altBeb,
+ ug_info->backPressureNoBackoff,
+ ug_info->noBackoff,
+ ug_info->excessDefer,
+ ug_info->altBebTruncation,
+ ug_info->maxRetransmission,
+ ug_info->collisionWindow,
+ &ug_regs->hafdup);
+ if (ret_val != 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Half Duplex initialization parameter too large\n");
+ return ret_val;
+ }
+
+ /* Set IFSTAT */
+ /* For more details see the hardware spec. */
+ /* Read only - resets upon read */
+ ifstat = in_be32(&ug_regs->ifstat);
+
+ /* Clear UEMPR */
+ /* For more details see the hardware spec. */
+ out_be32(&ug_regs->uempr, 0);
+
+ /* Set UESCR */
+ /* For more details see the hardware spec. */
+ init_hw_statistics_gathering_mode((ug_info->statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
+ 0, &uf_regs->upsmr, &ug_regs->uescr);
+
+ ret_val = ucc_geth_alloc_tx(ugeth);
+ if (ret_val != 0)
+ return ret_val;
+
+ ret_val = ucc_geth_alloc_rx(ugeth);
+ if (ret_val != 0)
+ return ret_val;
+
+ /*
+ * Global PRAM
+ */
+ /* Tx global PRAM */
+ /* Allocate global tx parameter RAM page */
+ ugeth->tx_glbl_pram_offset =
+ qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
+ UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n");
+ return -ENOMEM;
+ }
+ ugeth->p_tx_glbl_pram =
+ (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
+ tx_glbl_pram_offset);
+ /* Zero out p_tx_glbl_pram */
+ memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
+
+ /* Fill global PRAM */
+
+ /* TQPTR */
+ /* Size varies with number of Tx threads */
+ ugeth->thread_dat_tx_offset =
+ qe_muram_alloc(numThreadsTxNumerical *
+ sizeof(struct ucc_geth_thread_data_tx) +
+ 32 * (numThreadsTxNumerical == 1),
+ UCC_GETH_THREAD_DATA_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_thread_data_tx\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_thread_data_tx =
+ (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
+ thread_dat_tx_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
+
+ /* vtagtable */
+ for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
+ out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
+ ug_info->vtagtable[i]);
+
+ /* iphoffset */
+ for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
+ out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
+ ug_info->iphoffset[i]);
+
+ /* SQPTR */
+ /* Size varies with number of Tx queues */
+ ugeth->send_q_mem_reg_offset =
+ qe_muram_alloc(ug_info->numQueuesTx *
+ sizeof(struct ucc_geth_send_queue_qd),
+ UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_send_q_mem_reg\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_send_q_mem_reg =
+ (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
+ send_q_mem_reg_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
+
+ /* Setup the table */
+ /* Assume BD rings are already established */
+ for (i = 0; i < ug_info->numQueuesTx; i++) {
+ endOfRing =
+ ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
+ 1) * sizeof(struct qe_bd);
+ if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) virt_to_phys(endOfRing));
+ } else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM) {
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) immrbar_virt_to_phys(ugeth->
+ p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) immrbar_virt_to_phys(endOfRing));
+ }
+ }
+
+ /* schedulerbasepointer */
+
+ if (ug_info->numQueuesTx > 1) {
+ /* scheduler exists only if more than 1 tx queue */
+ ugeth->scheduler_offset =
+ qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
+ UCC_GETH_SCHEDULER_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_scheduler\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_scheduler =
+ (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
+ scheduler_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
+ ugeth->scheduler_offset);
+ /* Zero out p_scheduler */
+ memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
+
+ /* Set values in scheduler */
+ out_be32(&ugeth->p_scheduler->mblinterval,
+ ug_info->mblinterval);
+ out_be16(&ugeth->p_scheduler->nortsrbytetime,
+ ug_info->nortsrbytetime);
+ out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
+ out_8(&ugeth->p_scheduler->strictpriorityq,
+ ug_info->strictpriorityq);
+ out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
+ out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ out_8(&ugeth->p_scheduler->weightfactor[i],
+ ug_info->weightfactor[i]);
+
+ /* Set pointers to cpucount registers in scheduler */
+ ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
+ ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
+ ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
+ ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
+ ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
+ ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
+ ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
+ ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
+ }
+
+ /* schedulerbasepointer */
+ /* TxRMON_PTR (statistics) */
+ if (ug_info->
+ statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ ugeth->tx_fw_statistics_pram_offset =
+ qe_muram_alloc(sizeof
+ (struct ucc_geth_tx_firmware_statistics_pram),
+ UCC_GETH_TX_STATISTICS_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_tx_fw_statistics_pram\n");
+ return -ENOMEM;
+ }
+ ugeth->p_tx_fw_statistics_pram =
+ (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
+ qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
+ /* Zero out p_tx_fw_statistics_pram */
+ memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
+ 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
+ }
+
+ /* temoder */
+ /* Already has speed set */
+
+ if (ug_info->numQueuesTx > 1)
+ temoder |= TEMODER_SCHEDULER_ENABLE;
+ if (ug_info->ipCheckSumGenerate)
+ temoder |= TEMODER_IP_CHECKSUM_GENERATE;
+ temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
+ out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
+
+ test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
+
+ /* Function code register value to be used later */
+ function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
+ /* Required for QE */
+
+ /* function code register */
+ out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
+
+ /* Rx global PRAM */
+ /* Allocate global rx parameter RAM page */
+ ugeth->rx_glbl_pram_offset =
+ qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
+ UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n");
+ return -ENOMEM;
+ }
+ ugeth->p_rx_glbl_pram =
+ (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
+ rx_glbl_pram_offset);
+ /* Zero out p_rx_glbl_pram */
+ memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
+
+ /* Fill global PRAM */
+
+ /* RQPTR */
+ /* Size varies with number of Rx threads */
+ ugeth->thread_dat_rx_offset =
+ qe_muram_alloc(numThreadsRxNumerical *
+ sizeof(struct ucc_geth_thread_data_rx),
+ UCC_GETH_THREAD_DATA_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_thread_data_rx\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_thread_data_rx =
+ (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
+ thread_dat_rx_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
+
+ /* typeorlen */
+ out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
+
+ /* rxrmonbaseptr (statistics) */
+ if (ug_info->
+ statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
+ ugeth->rx_fw_statistics_pram_offset =
+ qe_muram_alloc(sizeof
+ (struct ucc_geth_rx_firmware_statistics_pram),
+ UCC_GETH_RX_STATISTICS_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_rx_fw_statistics_pram\n");
+ return -ENOMEM;
+ }
+ ugeth->p_rx_fw_statistics_pram =
+ (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
+ qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
+ /* Zero out p_rx_fw_statistics_pram */
+ memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
+ sizeof(struct ucc_geth_rx_firmware_statistics_pram));
+ }
+
+ /* intCoalescingPtr */
+
+ /* Size varies with number of Rx queues */
+ ugeth->rx_irq_coalescing_tbl_offset =
+ qe_muram_alloc(ug_info->numQueuesRx *
+ sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
+ + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_rx_irq_coalescing_tbl\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_rx_irq_coalescing_tbl =
+ (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
+ qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
+ ugeth->rx_irq_coalescing_tbl_offset);
+
+ /* Fill interrupt coalescing table */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
+ interruptcoalescingmaxvalue,
+ ug_info->interruptcoalescingmaxvalue[i]);
+ out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
+ interruptcoalescingcounter,
+ ug_info->interruptcoalescingmaxvalue[i]);
+ }
+
+ /* MRBLR */
+ init_max_rx_buff_len(uf_info->max_rx_buf_length,
+ &ugeth->p_rx_glbl_pram->mrblr);
+ /* MFLR */
+ out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
+ /* MINFLR */
+ init_min_frame_len(ug_info->minFrameLength,
+ &ugeth->p_rx_glbl_pram->minflr,
+ &ugeth->p_rx_glbl_pram->mrblr);
+ /* MAXD1 */
+ out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
+ /* MAXD2 */
+ out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
+
+ /* l2qt */
+ l2qt = 0;
+ for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
+ l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
+ out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
+
+ /* l3qt */
+ for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
+ l3qt = 0;
+ for (i = 0; i < 8; i++)
+ l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
+ out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
+ }
+
+ /* vlantype */
+ out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
+
+ /* vlantci */
+ out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
+
+ /* ecamptr */
+ out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
+
+ /* RBDQPTR */
+ /* Size varies with number of Rx queues */
+ ugeth->rx_bd_qs_tbl_offset =
+ qe_muram_alloc(ug_info->numQueuesRx *
+ (sizeof(struct ucc_geth_rx_bd_queues_entry) +
+ sizeof(struct ucc_geth_rx_prefetched_bds)),
+ UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_rx_bd_qs_tbl\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_rx_bd_qs_tbl =
+ (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
+ rx_bd_qs_tbl_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
+ /* Zero out p_rx_bd_qs_tbl */
+ memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
+ 0,
+ ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
+ sizeof(struct ucc_geth_rx_prefetched_bds)));
+
+ /* Setup the table */
+ /* Assume BD rings are already established */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
+ } else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) immrbar_virt_to_phys(ugeth->
+ p_rx_bd_ring[i]));
+ }
+ /* rest of fields handled by QE */
+ }
+
+ /* remoder */
+ /* Already has speed set */
+
+ if (ugeth->rx_extended_features)
+ remoder |= REMODER_RX_EXTENDED_FEATURES;
+ if (ug_info->rxExtendedFiltering)
+ remoder |= REMODER_RX_EXTENDED_FILTERING;
+ if (ug_info->dynamicMaxFrameLength)
+ remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
+ if (ug_info->dynamicMinFrameLength)
+ remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
+ remoder |=
+ ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
+ remoder |=
+ ug_info->
+ vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
+ remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
+ remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
+ if (ug_info->ipCheckSumCheck)
+ remoder |= REMODER_IP_CHECKSUM_CHECK;
+ if (ug_info->ipAddressAlignment)
+ remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
+ out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
+
+ /* Note that this function must be called */
+ /* ONLY AFTER p_tx_fw_statistics_pram */
+ /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
+ init_firmware_statistics_gathering_mode((ug_info->
+ statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
+ (ug_info->statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
+ &ugeth->p_tx_glbl_pram->txrmonbaseptr,
+ ugeth->tx_fw_statistics_pram_offset,
+ &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
+ ugeth->rx_fw_statistics_pram_offset,
+ &ugeth->p_tx_glbl_pram->temoder,
+ &ugeth->p_rx_glbl_pram->remoder);
+
+ /* function code register */
+ out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
+
+ /* initialize extended filtering */
+ if (ug_info->rxExtendedFiltering) {
+ if (!ug_info->extendedFilteringChainPointer) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Null Extended Filtering Chain Pointer\n");
+ return -EINVAL;
+ }
+
+ /* Allocate memory for extended filtering Mode Global
+ Parameters */
+ ugeth->exf_glbl_param_offset =
+ qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
+ UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
+ if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_exf_glbl_param\n");
+ return -ENOMEM;
+ }
+
+ ugeth->p_exf_glbl_param =
+ (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
+ exf_glbl_param_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
+ ugeth->exf_glbl_param_offset);
+ out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
+ (u32) ug_info->extendedFilteringChainPointer);
+
+ } else { /* initialize 82xx style address filtering */
+
+ /* Init individual address recognition registers to disabled */
+
+ for (j = 0; j < NUM_OF_PADDRS; j++)
+ ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
+
+ p_82xx_addr_filt =
+ (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
+ p_rx_glbl_pram->addressfiltering;
+
+ ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
+ ENET_ADDR_TYPE_GROUP);
+ ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
+ ENET_ADDR_TYPE_INDIVIDUAL);
+ }
+
+ /*
+ * Initialize UCC at QE level
+ */
+
+ command = QE_INIT_TX_RX;
+
+ /* Allocate shadow InitEnet command parameter structure.
+ * This is needed because after the InitEnet command is executed,
+ * the structure in DPRAM is released, because DPRAM is a premium
+ * resource.
+ * This shadow structure keeps a copy of what was done so that the
+ * allocated resources can be released when the channel is freed.
+ */
+ if (!(ugeth->p_init_enet_param_shadow =
+ kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n");
+ return -ENOMEM;
+ }
+ /* Zero out *p_init_enet_param_shadow */
+ memset((char *)ugeth->p_init_enet_param_shadow,
+ 0, sizeof(struct ucc_geth_init_pram));
+
+ /* Fill shadow InitEnet command parameter structure */
+
+ ugeth->p_init_enet_param_shadow->resinit1 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT1;
+ ugeth->p_init_enet_param_shadow->resinit2 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT2;
+ ugeth->p_init_enet_param_shadow->resinit3 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT3;
+ ugeth->p_init_enet_param_shadow->resinit4 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT4;
+ ugeth->p_init_enet_param_shadow->resinit5 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT5;
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
+
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ugeth->rx_glbl_pram_offset | ug_info->riscRx;
+ if ((ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) &&
+ (ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) &&
+ (ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Invalid largest External Lookup Key Size\n");
+ return -EINVAL;
+ }
+ ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
+ ug_info->largestexternallookupkeysize;
+ size = sizeof(struct ucc_geth_thread_rx_pram);
+ if (ug_info->rxExtendedFiltering) {
+ size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
+ if (ug_info->largestexternallookupkeysize ==
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
+ if (ug_info->largestexternallookupkeysize ==
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
+ }
+
+ if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
+ p_init_enet_param_shadow->rxthread[0]),
+ (u8) (numThreadsRxNumerical + 1)
+ /* Rx needs one extra for terminator */
+ , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
+ ug_info->riscRx, 1)) != 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not fill p_init_enet_param_shadow\n");
+ return ret_val;
+ }
+
+ ugeth->p_init_enet_param_shadow->txglobal =
+ ugeth->tx_glbl_pram_offset | ug_info->riscTx;
+ if ((ret_val =
+ fill_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]), numThreadsTxNumerical,
+ sizeof(struct ucc_geth_thread_tx_pram),
+ UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
+ ug_info->riscTx, 0)) != 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not fill p_init_enet_param_shadow\n");
+ return ret_val;
+ }
+
+ /* Load Rx bds with buffers */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not fill Rx bds with buffers\n");
+ return ret_val;
+ }
+ }
+
+ /* Allocate InitEnet command parameter structure */
+ init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
+ if (IS_ERR_VALUE(init_enet_pram_offset)) {
+ if (netif_msg_ifup(ugeth))
+ pr_err("Can not allocate DPRAM memory for p_init_enet_pram\n");
+ return -ENOMEM;
+ }
+ p_init_enet_pram =
+ (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
+
+ /* Copy shadow InitEnet command parameter structure into PRAM */
+ out_8(&p_init_enet_pram->resinit1,
+ ugeth->p_init_enet_param_shadow->resinit1);
+ out_8(&p_init_enet_pram->resinit2,
+ ugeth->p_init_enet_param_shadow->resinit2);
+ out_8(&p_init_enet_pram->resinit3,
+ ugeth->p_init_enet_param_shadow->resinit3);
+ out_8(&p_init_enet_pram->resinit4,
+ ugeth->p_init_enet_param_shadow->resinit4);
+ out_be16(&p_init_enet_pram->resinit5,
+ ugeth->p_init_enet_param_shadow->resinit5);
+ out_8(&p_init_enet_pram->largestexternallookupkeysize,
+ ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
+ out_be32(&p_init_enet_pram->rgftgfrxglobal,
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
+ for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
+ out_be32(&p_init_enet_pram->rxthread[i],
+ ugeth->p_init_enet_param_shadow->rxthread[i]);
+ out_be32(&p_init_enet_pram->txglobal,
+ ugeth->p_init_enet_param_shadow->txglobal);
+ for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
+ out_be32(&p_init_enet_pram->txthread[i],
+ ugeth->p_init_enet_param_shadow->txthread[i]);
+
+ /* Issue QE command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
+ init_enet_pram_offset);
+
+ /* Free InitEnet command parameter */
+ qe_muram_free(init_enet_pram_offset);
+
+ return 0;
+}
+
+/* This is called by the kernel when a frame is ready for transmission. */
+/* It is pointed to by the dev->hard_start_xmit function pointer */
+static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+ struct ucc_fast_private *uccf;
+#endif
+ u8 __iomem *bd; /* BD pointer */
+ u32 bd_status;
+ u8 txQ = 0;
+ unsigned long flags;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ spin_lock_irqsave(&ugeth->lock, flags);
+
+ dev->stats.tx_bytes += skb->len;
+
+ /* Start from the next BD that should be filled */
+ bd = ugeth->txBd[txQ];
+ bd_status = in_be32((u32 __iomem *)bd);
+ /* Save the skb pointer so we can free it later */
+ ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
+
+ /* Update the current skb pointer (wrapping if this was the last) */
+ ugeth->skb_curtx[txQ] =
+ (ugeth->skb_curtx[txQ] +
+ 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
+
+ /* set up the buffer descriptor */
+ out_be32(&((struct qe_bd __iomem *)bd)->buf,
+ dma_map_single(ugeth->dev, skb->data,
+ skb->len, DMA_TO_DEVICE));
+
+ /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
+
+ bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
+
+ /* set bd status and length */
+ out_be32((u32 __iomem *)bd, bd_status);
+
+ /* Move to next BD in the ring */
+ if (!(bd_status & T_W))
+ bd += sizeof(struct qe_bd);
+ else
+ bd = ugeth->p_tx_bd_ring[txQ];
+
+ /* If the next BD still needs to be cleaned up, then the bds
+ are full. We need to tell the kernel to stop sending us stuff. */
+ if (bd == ugeth->confBd[txQ]) {
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+ }
+
+ ugeth->txBd[txQ] = bd;
+
+ skb_tx_timestamp(skb);
+
+ if (ugeth->p_scheduler) {
+ ugeth->cpucount[txQ]++;
+ /* Indicate to QE that there are more Tx bds ready for
+ transmission */
+ /* This is done by writing a running counter of the bd
+ count to the scheduler PRAM. */
+ out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
+ }
+
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+ uccf = ugeth->uccf;
+ out_be16(uccf->p_utodr, UCC_FAST_TOD);
+#endif
+ spin_unlock_irqrestore(&ugeth->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
+{
+ struct sk_buff *skb;
+ u8 __iomem *bd;
+ u16 length, howmany = 0;
+ u32 bd_status;
+ u8 *bdBuffer;
+ struct net_device *dev;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ dev = ugeth->ndev;
+
+ /* collect received buffers */
+ bd = ugeth->rxBd[rxQ];
+
+ bd_status = in_be32((u32 __iomem *)bd);
+
+ /* while there are received buffers and BD is full (~R_E) */
+ while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
+ bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
+ length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
+ skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
+
+ /* determine whether buffer is first, last, first and last
+ (single buffer frame) or middle (not first and not last) */
+ if (!skb ||
+ (!(bd_status & (R_F | R_L))) ||
+ (bd_status & R_ERRORS_FATAL)) {
+ if (netif_msg_rx_err(ugeth))
+ pr_err("%d: ERROR!!! skb - 0x%08x\n",
+ __LINE__, (u32)skb);
+ dev_kfree_skb(skb);
+
+ ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
+ dev->stats.rx_dropped++;
+ } else {
+ dev->stats.rx_packets++;
+ howmany++;
+
+ /* Prep the skb for the packet */
+ skb_put(skb, length);
+
+ /* Tell the skb what kind of packet this is */
+ skb->protocol = eth_type_trans(skb, ugeth->ndev);
+
+ dev->stats.rx_bytes += length;
+ /* Send the packet up the stack */
+ netif_receive_skb(skb);
+ }
+
+ skb = get_new_skb(ugeth, bd);
+ if (!skb) {
+ if (netif_msg_rx_err(ugeth))
+ pr_warn("No Rx Data Buffer\n");
+ dev->stats.rx_dropped++;
+ break;
+ }
+
+ ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
+
+ /* update to point at the next skb */
+ ugeth->skb_currx[rxQ] =
+ (ugeth->skb_currx[rxQ] +
+ 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
+
+ if (bd_status & R_W)
+ bd = ugeth->p_rx_bd_ring[rxQ];
+ else
+ bd += sizeof(struct qe_bd);
+
+ bd_status = in_be32((u32 __iomem *)bd);
+ }
+
+ ugeth->rxBd[rxQ] = bd;
+ return howmany;
+}
+
+static int ucc_geth_tx(struct net_device *dev, u8 txQ)
+{
+ /* Start from the next BD that should be filled */
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ u8 __iomem *bd; /* BD pointer */
+ u32 bd_status;
+
+ bd = ugeth->confBd[txQ];
+ bd_status = in_be32((u32 __iomem *)bd);
+
+ /* Normal processing. */
+ while ((bd_status & T_R) == 0) {
+ struct sk_buff *skb;
+
+ /* BD contains already transmitted buffer. */
+ /* Handle the transmitted buffer and release */
+ /* the BD to be used with the current frame */
+
+ skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
+ if (!skb)
+ break;
+
+ dev->stats.tx_packets++;
+
+ dev_consume_skb_any(skb);
+
+ ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
+ ugeth->skb_dirtytx[txQ] =
+ (ugeth->skb_dirtytx[txQ] +
+ 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
+
+ /* We freed a buffer, so now we can restart transmission */
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ /* Advance the confirmation BD pointer */
+ if (!(bd_status & T_W))
+ bd += sizeof(struct qe_bd);
+ else
+ bd = ugeth->p_tx_bd_ring[txQ];
+ bd_status = in_be32((u32 __iomem *)bd);
+ }
+ ugeth->confBd[txQ] = bd;
+ return 0;
+}
+
+static int ucc_geth_poll(struct napi_struct *napi, int budget)
+{
+ struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
+ struct ucc_geth_info *ug_info;
+ int howmany, i;
+
+ ug_info = ugeth->ug_info;
+
+ /* Tx event processing */
+ spin_lock(&ugeth->lock);
+ for (i = 0; i < ug_info->numQueuesTx; i++)
+ ucc_geth_tx(ugeth->ndev, i);
+ spin_unlock(&ugeth->lock);
+
+ howmany = 0;
+ for (i = 0; i < ug_info->numQueuesRx; i++)
+ howmany += ucc_geth_rx(ugeth, i, budget - howmany);
+
+ if (howmany < budget) {
+ napi_complete(napi);
+ setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
+ }
+
+ return howmany;
+}
+
+static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
+{
+ struct net_device *dev = info;
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct ucc_fast_private *uccf;
+ struct ucc_geth_info *ug_info;
+ register u32 ucce;
+ register u32 uccm;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ uccf = ugeth->uccf;
+ ug_info = ugeth->ug_info;
+
+ /* read and clear events */
+ ucce = (u32) in_be32(uccf->p_ucce);
+ uccm = (u32) in_be32(uccf->p_uccm);
+ ucce &= uccm;
+ out_be32(uccf->p_ucce, ucce);
+
+ /* check for receive events that require processing */
+ if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) {
+ if (napi_schedule_prep(&ugeth->napi)) {
+ uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS);
+ out_be32(uccf->p_uccm, uccm);
+ __napi_schedule(&ugeth->napi);
+ }
+ }
+
+ /* Errors and other events */
+ if (ucce & UCCE_OTHER) {
+ if (ucce & UCC_GETH_UCCE_BSY)
+ dev->stats.rx_errors++;
+ if (ucce & UCC_GETH_UCCE_TXE)
+ dev->stats.tx_errors++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void ucc_netpoll(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ int irq = ugeth->ug_info->uf_info.irq;
+
+ disable_irq(irq);
+ ucc_geth_irq_handler(irq, dev);
+ enable_irq(irq);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ /*
+ * If device is not running, we will set mac addr register
+ * when opening the device.
+ */
+ if (!netif_running(dev))
+ return 0;
+
+ spin_lock_irq(&ugeth->lock);
+ init_mac_station_addr_regs(dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5],
+ &ugeth->ug_regs->macstnaddr1,
+ &ugeth->ug_regs->macstnaddr2);
+ spin_unlock_irq(&ugeth->lock);
+
+ return 0;
+}
+
+static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
+{
+ struct net_device *dev = ugeth->ndev;
+ int err;
+
+ err = ucc_struct_init(ugeth);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot configure internal struct, aborting\n");
+ goto err;
+ }
+
+ err = ucc_geth_startup(ugeth);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
+ goto err;
+ }
+
+ err = adjust_enet_interface(ugeth);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
+ goto err;
+ }
+
+ /* Set MACSTNADDR1, MACSTNADDR2 */
+ /* For more details see the hardware spec. */
+ init_mac_station_addr_regs(dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5],
+ &ugeth->ug_regs->macstnaddr1,
+ &ugeth->ug_regs->macstnaddr2);
+
+ err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ ucc_geth_stop(ugeth);
+ return err;
+}
+
+/* Called when something needs to use the ethernet device */
+/* Returns 0 for success. */
+static int ucc_geth_open(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ int err;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ /* Test station address */
+ if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
+ netif_err(ugeth, ifup, dev,
+ "Multicast address used for station address - is this what you wanted?\n");
+ return -EINVAL;
+ }
+
+ err = init_phy(dev);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
+ return err;
+ }
+
+ err = ucc_geth_init_mac(ugeth);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot initialize MAC, aborting\n");
+ goto err;
+ }
+
+ err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
+ 0, "UCC Geth", dev);
+ if (err) {
+ netif_err(ugeth, ifup, dev, "Cannot get IRQ for net device, aborting\n");
+ goto err;
+ }
+
+ phy_start(ugeth->phydev);
+ napi_enable(&ugeth->napi);
+ netif_start_queue(dev);
+
+ device_set_wakeup_capable(&dev->dev,
+ qe_alive_during_sleep() || ugeth->phydev->irq);
+ device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
+
+ return err;
+
+err:
+ ucc_geth_stop(ugeth);
+ return err;
+}
+
+/* Stops the kernel queue, and halts the controller */
+static int ucc_geth_close(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ napi_disable(&ugeth->napi);
+
+ cancel_work_sync(&ugeth->timeout_work);
+ ucc_geth_stop(ugeth);
+ phy_disconnect(ugeth->phydev);
+ ugeth->phydev = NULL;
+
+ free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
+
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+/* Reopen device. This will reset the MAC and PHY. */
+static void ucc_geth_timeout_work(struct work_struct *work)
+{
+ struct ucc_geth_private *ugeth;
+ struct net_device *dev;
+
+ ugeth = container_of(work, struct ucc_geth_private, timeout_work);
+ dev = ugeth->ndev;
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ dev->stats.tx_errors++;
+
+ ugeth_dump_regs(ugeth);
+
+ if (dev->flags & IFF_UP) {
+ /*
+ * Must reset MAC *and* PHY. This is done by reopening
+ * the device.
+ */
+ netif_tx_stop_all_queues(dev);
+ ucc_geth_stop(ugeth);
+ ucc_geth_init_mac(ugeth);
+ /* Must start PHY here */
+ phy_start(ugeth->phydev);
+ netif_tx_start_all_queues(dev);
+ }
+
+ netif_tx_schedule_all(dev);
+}
+
+/*
+ * ucc_geth_timeout gets called when a packet has not been
+ * transmitted after a set amount of time.
+ */
+static void ucc_geth_timeout(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ schedule_work(&ugeth->timeout_work);
+}
+
+
+#ifdef CONFIG_PM
+
+static int ucc_geth_suspend(struct platform_device *ofdev, pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(ofdev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return 0;
+
+ netif_device_detach(ndev);
+ napi_disable(&ugeth->napi);
+
+ /*
+ * Disable the controller, otherwise we'll wakeup on any network
+ * activity.
+ */
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ if (ugeth->wol_en & WAKE_MAGIC) {
+ setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
+ setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
+ ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
+ } else if (!(ugeth->wol_en & WAKE_PHY)) {
+ phy_stop(ugeth->phydev);
+ }
+
+ return 0;
+}
+
+static int ucc_geth_resume(struct platform_device *ofdev)
+{
+ struct net_device *ndev = platform_get_drvdata(ofdev);
+ struct ucc_geth_private *ugeth = netdev_priv(ndev);
+ int err;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ if (qe_alive_during_sleep()) {
+ if (ugeth->wol_en & WAKE_MAGIC) {
+ ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX);
+ clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
+ clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
+ }
+ ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ } else {
+ /*
+ * Full reinitialization is required if QE shuts down
+ * during sleep.
+ */
+ ucc_geth_memclean(ugeth);
+
+ err = ucc_geth_init_mac(ugeth);
+ if (err) {
+ netdev_err(ndev, "Cannot initialize MAC, aborting\n");
+ return err;
+ }
+ }
+
+ ugeth->oldlink = 0;
+ ugeth->oldspeed = 0;
+ ugeth->oldduplex = -1;
+
+ phy_stop(ugeth->phydev);
+ phy_start(ugeth->phydev);
+
+ napi_enable(&ugeth->napi);
+ netif_device_attach(ndev);
+
+ return 0;
+}
+
+#else
+#define ucc_geth_suspend NULL
+#define ucc_geth_resume NULL
+#endif
+
+static phy_interface_t to_phy_interface(const char *phy_connection_type)
+{
+ if (strcasecmp(phy_connection_type, "mii") == 0)
+ return PHY_INTERFACE_MODE_MII;
+ if (strcasecmp(phy_connection_type, "gmii") == 0)
+ return PHY_INTERFACE_MODE_GMII;
+ if (strcasecmp(phy_connection_type, "tbi") == 0)
+ return PHY_INTERFACE_MODE_TBI;
+ if (strcasecmp(phy_connection_type, "rmii") == 0)
+ return PHY_INTERFACE_MODE_RMII;
+ if (strcasecmp(phy_connection_type, "rgmii") == 0)
+ return PHY_INTERFACE_MODE_RGMII;
+ if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
+ return PHY_INTERFACE_MODE_RGMII_ID;
+ if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
+ return PHY_INTERFACE_MODE_RGMII_TXID;
+ if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
+ return PHY_INTERFACE_MODE_RGMII_RXID;
+ if (strcasecmp(phy_connection_type, "rtbi") == 0)
+ return PHY_INTERFACE_MODE_RTBI;
+ if (strcasecmp(phy_connection_type, "sgmii") == 0)
+ return PHY_INTERFACE_MODE_SGMII;
+
+ return PHY_INTERFACE_MODE_MII;
+}
+
+static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!ugeth->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(ugeth->phydev, rq, cmd);
+}
+
+static const struct net_device_ops ucc_geth_netdev_ops = {
+ .ndo_open = ucc_geth_open,
+ .ndo_stop = ucc_geth_close,
+ .ndo_start_xmit = ucc_geth_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = ucc_geth_set_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_rx_mode = ucc_geth_set_multi,
+ .ndo_tx_timeout = ucc_geth_timeout,
+ .ndo_do_ioctl = ucc_geth_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ucc_netpoll,
+#endif
+};
+
+static int ucc_geth_probe(struct platform_device* ofdev)
+{
+ struct device *device = &ofdev->dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct net_device *dev = NULL;
+ struct ucc_geth_private *ugeth = NULL;
+ struct ucc_geth_info *ug_info;
+ struct resource res;
+ int err, ucc_num, max_speed = 0;
+ const unsigned int *prop;
+ const char *sprop;
+ const void *mac_addr;
+ phy_interface_t phy_interface;
+ static const int enet_to_speed[] = {
+ SPEED_10, SPEED_10, SPEED_10,
+ SPEED_100, SPEED_100, SPEED_100,
+ SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
+ };
+ static const phy_interface_t enet_to_phy_interface[] = {
+ PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
+ PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
+ PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
+ PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
+ PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
+ PHY_INTERFACE_MODE_SGMII,
+ };
+
+ ugeth_vdbg("%s: IN", __func__);
+
+ prop = of_get_property(np, "cell-index", NULL);
+ if (!prop) {
+ prop = of_get_property(np, "device-id", NULL);
+ if (!prop)
+ return -ENODEV;
+ }
+
+ ucc_num = *prop - 1;
+ if ((ucc_num < 0) || (ucc_num > 7))
+ return -ENODEV;
+
+ ug_info = &ugeth_info[ucc_num];
+ if (ug_info == NULL) {
+ if (netif_msg_probe(&debug))
+ pr_err("[%d] Missing additional data!\n", ucc_num);
+ return -ENODEV;
+ }
+
+ ug_info->uf_info.ucc_num = ucc_num;
+
+ sprop = of_get_property(np, "rx-clock-name", NULL);
+ if (sprop) {
+ ug_info->uf_info.rx_clock = qe_clock_source(sprop);
+ if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
+ (ug_info->uf_info.rx_clock > QE_CLK24)) {
+ pr_err("invalid rx-clock-name property\n");
+ return -EINVAL;
+ }
+ } else {
+ prop = of_get_property(np, "rx-clock", NULL);
+ if (!prop) {
+ /* If both rx-clock-name and rx-clock are missing,
+ we want to tell people to use rx-clock-name. */
+ pr_err("missing rx-clock-name property\n");
+ return -EINVAL;
+ }
+ if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
+ pr_err("invalid rx-clock propperty\n");
+ return -EINVAL;
+ }
+ ug_info->uf_info.rx_clock = *prop;
+ }
+
+ sprop = of_get_property(np, "tx-clock-name", NULL);
+ if (sprop) {
+ ug_info->uf_info.tx_clock = qe_clock_source(sprop);
+ if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
+ (ug_info->uf_info.tx_clock > QE_CLK24)) {
+ pr_err("invalid tx-clock-name property\n");
+ return -EINVAL;
+ }
+ } else {
+ prop = of_get_property(np, "tx-clock", NULL);
+ if (!prop) {
+ pr_err("missing tx-clock-name property\n");
+ return -EINVAL;
+ }
+ if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
+ pr_err("invalid tx-clock property\n");
+ return -EINVAL;
+ }
+ ug_info->uf_info.tx_clock = *prop;
+ }
+
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ return -EINVAL;
+
+ ug_info->uf_info.regs = res.start;
+ ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
+
+ ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!ug_info->phy_node && of_phy_is_fixed_link(np)) {
+ /*
+ * In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ err = of_phy_register_fixed_link(np);
+ if (err)
+ return err;
+ ug_info->phy_node = of_node_get(np);
+ }
+
+ /* Find the TBI PHY node. If it's not there, we don't support SGMII */
+ ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
+
+ /* get the phy interface type, or default to MII */
+ prop = of_get_property(np, "phy-connection-type", NULL);
+ if (!prop) {
+ /* handle interface property present in old trees */
+ prop = of_get_property(ug_info->phy_node, "interface", NULL);
+ if (prop != NULL) {
+ phy_interface = enet_to_phy_interface[*prop];
+ max_speed = enet_to_speed[*prop];
+ } else
+ phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ phy_interface = to_phy_interface((const char *)prop);
+ }
+
+ /* get speed, or derive from PHY interface */
+ if (max_speed == 0)
+ switch (phy_interface) {
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_SGMII:
+ max_speed = SPEED_1000;
+ break;
+ default:
+ max_speed = SPEED_100;
+ break;
+ }
+
+ if (max_speed == SPEED_1000) {
+ unsigned int snums = qe_get_num_of_snums();
+
+ /* configure muram FIFOs for gigabit operation */
+ ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
+ ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
+ ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
+ ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
+ ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
+ ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
+ ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
+
+ /* If QE's snum number is 46/76 which means we need to support
+ * 4 UECs at 1000Base-T simultaneously, we need to allocate
+ * more Threads to Rx.
+ */
+ if ((snums == 76) || (snums == 46))
+ ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6;
+ else
+ ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
+ }
+
+ if (netif_msg_probe(&debug))
+ pr_info("UCC%1d at 0x%8x (irq = %d)\n",
+ ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
+ ug_info->uf_info.irq);
+
+ /* Create an ethernet device instance */
+ dev = alloc_etherdev(sizeof(*ugeth));
+
+ if (dev == NULL) {
+ of_node_put(ug_info->tbi_node);
+ of_node_put(ug_info->phy_node);
+ return -ENOMEM;
+ }
+
+ ugeth = netdev_priv(dev);
+ spin_lock_init(&ugeth->lock);
+
+ /* Create CQs for hash tables */
+ INIT_LIST_HEAD(&ugeth->group_hash_q);
+ INIT_LIST_HEAD(&ugeth->ind_hash_q);
+
+ dev_set_drvdata(device, dev);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
+
+ SET_NETDEV_DEV(dev, device);
+
+ /* Fill in the dev structure */
+ uec_set_ethtool_ops(dev);
+ dev->netdev_ops = &ucc_geth_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+ INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
+ netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
+ dev->mtu = 1500;
+
+ ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
+ ugeth->phy_interface = phy_interface;
+ ugeth->max_speed = max_speed;
+
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
+ err = register_netdev(dev);
+ if (err) {
+ if (netif_msg_probe(ugeth))
+ pr_err("%s: Cannot register net device, aborting\n",
+ dev->name);
+ free_netdev(dev);
+ of_node_put(ug_info->tbi_node);
+ of_node_put(ug_info->phy_node);
+ return err;
+ }
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+
+ ugeth->ug_info = ug_info;
+ ugeth->dev = device;
+ ugeth->ndev = dev;
+ ugeth->node = np;
+
+ return 0;
+}
+
+static int ucc_geth_remove(struct platform_device* ofdev)
+{
+ struct net_device *dev = platform_get_drvdata(ofdev);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ free_netdev(dev);
+ ucc_geth_memclean(ugeth);
+ of_node_put(ugeth->ug_info->tbi_node);
+ of_node_put(ugeth->ug_info->phy_node);
+
+ return 0;
+}
+
+static const struct of_device_id ucc_geth_match[] = {
+ {
+ .type = "network",
+ .compatible = "ucc_geth",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, ucc_geth_match);
+
+static struct platform_driver ucc_geth_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ucc_geth_match,
+ },
+ .probe = ucc_geth_probe,
+ .remove = ucc_geth_remove,
+ .suspend = ucc_geth_suspend,
+ .resume = ucc_geth_resume,
+};
+
+static int __init ucc_geth_init(void)
+{
+ int i, ret;
+
+ if (netif_msg_drv(&debug))
+ pr_info(DRV_DESC "\n");
+ for (i = 0; i < 8; i++)
+ memcpy(&(ugeth_info[i]), &ugeth_primary_info,
+ sizeof(ugeth_primary_info));
+
+ ret = platform_driver_register(&ucc_geth_driver);
+
+ return ret;
+}
+
+static void __exit ucc_geth_exit(void)
+{
+ platform_driver_unregister(&ucc_geth_driver);
+}
+
+module_init(ucc_geth_init);
+module_exit(ucc_geth_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/net/ethernet/freescale/ucc_geth.h b/kernel/drivers/net/ethernet/freescale/ucc_geth.h
new file mode 100644
index 000000000..75f337163
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/ucc_geth.h
@@ -0,0 +1,1238 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006-2009. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description:
+ * Internal header file for UCC Gigabit Ethernet unit routines.
+ *
+ * Changelog:
+ * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
+ * - Rearrange code and style fixes
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __UCC_GETH_H__
+#define __UCC_GETH_H__
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/if_ether.h>
+
+#include <asm/immap_qe.h>
+#include <asm/qe.h>
+
+#include <asm/ucc.h>
+#include <asm/ucc_fast.h>
+
+#define DRV_DESC "QE UCC Gigabit Ethernet Controller"
+#define DRV_NAME "ucc_geth"
+#define DRV_VERSION "1.1"
+
+#define NUM_TX_QUEUES 8
+#define NUM_RX_QUEUES 8
+#define NUM_BDS_IN_PREFETCHED_BDS 4
+#define TX_IP_OFFSET_ENTRY_MAX 8
+#define NUM_OF_PADDRS 4
+#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9
+#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8
+
+struct ucc_geth {
+ struct ucc_fast uccf;
+ u8 res0[0x100 - sizeof(struct ucc_fast)];
+
+ u32 maccfg1; /* mac configuration reg. 1 */
+ u32 maccfg2; /* mac configuration reg. 2 */
+ u32 ipgifg; /* interframe gap reg. */
+ u32 hafdup; /* half-duplex reg. */
+ u8 res1[0x10];
+ u8 miimng[0x18]; /* MII management structure moved to _mii.h */
+ u32 ifctl; /* interface control reg */
+ u32 ifstat; /* interface statux reg */
+ u32 macstnaddr1; /* mac station address part 1 reg */
+ u32 macstnaddr2; /* mac station address part 2 reg */
+ u8 res2[0x8];
+ u32 uempr; /* UCC Ethernet Mac parameter reg */
+ u32 utbipar; /* UCC tbi address reg */
+ u16 uescr; /* UCC Ethernet statistics control reg */
+ u8 res3[0x180 - 0x15A];
+ u32 tx64; /* Total number of frames (including bad
+ frames) transmitted that were exactly of the
+ minimal length (64 for un tagged, 68 for
+ tagged, or with length exactly equal to the
+ parameter MINLength */
+ u32 tx127; /* Total number of frames (including bad
+ frames) transmitted that were between
+ MINLength (Including FCS length==4) and 127
+ octets */
+ u32 tx255; /* Total number of frames (including bad
+ frames) transmitted that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 rx64; /* Total number of frames received including
+ bad frames that were exactly of the mninimal
+ length (64 bytes) */
+ u32 rx127; /* Total number of frames (including bad
+ frames) received that were between MINLength
+ (Including FCS length==4) and 127 octets */
+ u32 rx255; /* Total number of frames (including bad
+ frames) received that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 txok; /* Total number of octets residing in frames
+ that where involved in successful
+ transmission */
+ u16 txcf; /* Total number of PAUSE control frames
+ transmitted by this MAC */
+ u8 res4[0x2];
+ u32 tmca; /* Total number of frames that were transmitted
+ successfully with the group address bit set
+ that are not broadcast frames */
+ u32 tbca; /* Total number of frames transmitted
+ successfully that had destination address
+ field equal to the broadcast address */
+ u32 rxfok; /* Total number of frames received OK */
+ u32 rxbok; /* Total number of octets received OK */
+ u32 rbyt; /* Total number of octets received including
+ octets in bad frames. Must be implemented in
+ HW because it includes octets in frames that
+ never even reach the UCC */
+ u32 rmca; /* Total number of frames that were received
+ successfully with the group address bit set
+ that are not broadcast frames */
+ u32 rbca; /* Total number of frames received successfully
+ that had destination address equal to the
+ broadcast address */
+ u32 scar; /* Statistics carry register */
+ u32 scam; /* Statistics caryy mask register */
+ u8 res5[0x200 - 0x1c4];
+} __packed;
+
+/* UCC GETH TEMODR Register */
+#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics
+ */
+#define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */
+#define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4
+ checksums */
+#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance
+ optimization
+ enhancement (mode1) */
+#define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics
+ */
+#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues <<
+ shift */
+
+/* UCC GETH TEMODR Register */
+#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx
+ statistics */
+#define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable
+ extended
+ features */
+#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation
+ tagged << shift */
+#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non
+ tagged << shift */
+#define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift
+ */
+#define REMODER_RMON_STATISTICS 0x00001000 /* enable rx
+ statistics */
+#define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended
+ filtering
+ vs.
+ mpc82xx-like
+ filtering */
+#define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues <<
+ shift */
+#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable
+ dynamic max
+ frame length
+ */
+#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable
+ dynamic min
+ frame length
+ */
+#define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4
+ checksums */
+#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip
+ address to
+ 4-byte
+ boundary */
+
+/* UCC GETH Event Register */
+#define UCCE_TXB (UCC_GETH_UCCE_TXB7 | UCC_GETH_UCCE_TXB6 | \
+ UCC_GETH_UCCE_TXB5 | UCC_GETH_UCCE_TXB4 | \
+ UCC_GETH_UCCE_TXB3 | UCC_GETH_UCCE_TXB2 | \
+ UCC_GETH_UCCE_TXB1 | UCC_GETH_UCCE_TXB0)
+
+#define UCCE_RXB (UCC_GETH_UCCE_RXB7 | UCC_GETH_UCCE_RXB6 | \
+ UCC_GETH_UCCE_RXB5 | UCC_GETH_UCCE_RXB4 | \
+ UCC_GETH_UCCE_RXB3 | UCC_GETH_UCCE_RXB2 | \
+ UCC_GETH_UCCE_RXB1 | UCC_GETH_UCCE_RXB0)
+
+#define UCCE_RXF (UCC_GETH_UCCE_RXF7 | UCC_GETH_UCCE_RXF6 | \
+ UCC_GETH_UCCE_RXF5 | UCC_GETH_UCCE_RXF4 | \
+ UCC_GETH_UCCE_RXF3 | UCC_GETH_UCCE_RXF2 | \
+ UCC_GETH_UCCE_RXF1 | UCC_GETH_UCCE_RXF0)
+
+#define UCCE_OTHER (UCC_GETH_UCCE_SCAR | UCC_GETH_UCCE_GRA | \
+ UCC_GETH_UCCE_CBPR | UCC_GETH_UCCE_BSY | \
+ UCC_GETH_UCCE_RXC | UCC_GETH_UCCE_TXC | UCC_GETH_UCCE_TXE)
+
+#define UCCE_RX_EVENTS (UCCE_RXF | UCC_GETH_UCCE_BSY)
+#define UCCE_TX_EVENTS (UCCE_TXB | UCC_GETH_UCCE_TXE)
+
+/* TBI defines */
+#define ENET_TBI_MII_CR 0x00 /* Control */
+#define ENET_TBI_MII_SR 0x01 /* Status */
+#define ENET_TBI_MII_ANA 0x04 /* AN advertisement */
+#define ENET_TBI_MII_ANLPBPA 0x05 /* AN link partner base page ability */
+#define ENET_TBI_MII_ANEX 0x06 /* AN expansion */
+#define ENET_TBI_MII_ANNPT 0x07 /* AN next page transmit */
+#define ENET_TBI_MII_ANLPANP 0x08 /* AN link partner ability next page */
+#define ENET_TBI_MII_EXST 0x0F /* Extended status */
+#define ENET_TBI_MII_JD 0x10 /* Jitter diagnostics */
+#define ENET_TBI_MII_TBICON 0x11 /* TBI control */
+
+/* TBI MDIO register bit fields*/
+#define TBISR_LSTATUS 0x0004
+#define TBICON_CLK_SELECT 0x0020
+#define TBIANA_ASYMMETRIC_PAUSE 0x0100
+#define TBIANA_SYMMETRIC_PAUSE 0x0080
+#define TBIANA_HALF_DUPLEX 0x0040
+#define TBIANA_FULL_DUPLEX 0x0020
+#define TBICR_PHY_RESET 0x8000
+#define TBICR_ANEG_ENABLE 0x1000
+#define TBICR_RESTART_ANEG 0x0200
+#define TBICR_FULL_DUPLEX 0x0100
+#define TBICR_SPEED1_SET 0x0040
+
+#define TBIANA_SETTINGS ( \
+ TBIANA_ASYMMETRIC_PAUSE \
+ | TBIANA_SYMMETRIC_PAUSE \
+ | TBIANA_FULL_DUPLEX \
+ )
+#define TBICR_SETTINGS ( \
+ TBICR_PHY_RESET \
+ | TBICR_ANEG_ENABLE \
+ | TBICR_FULL_DUPLEX \
+ | TBICR_SPEED1_SET \
+ )
+
+/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
+#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
+ Rx */
+#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control
+ Tx */
+#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable
+ synchronized
+ to Rx stream
+ */
+#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */
+#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable
+ synchronized
+ to Tx stream
+ */
+#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */
+
+/* UCC GETH MACCFG2 (MAC Configuration 2 Register) */
+#define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble
+ Length <<
+ shift */
+#define MACCFG2_PREL_MASK 0x0000f000 /* Preamble
+ Length mask */
+#define MACCFG2_SRP 0x00000080 /* Soft Receive
+ Preamble */
+#define MACCFG2_STP 0x00000040 /* Soft
+ Transmit
+ Preamble */
+#define MACCFG2_RESERVED_1 0x00000020 /* Reserved -
+ must be set
+ to 1 */
+#define MACCFG2_LC 0x00000010 /* Length Check
+ */
+#define MACCFG2_MPE 0x00000008 /* Magic packet
+ detect */
+#define MACCFG2_FDX 0x00000001 /* Full Duplex */
+#define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex
+ mask */
+#define MACCFG2_PAD_CRC 0x00000004
+#define MACCFG2_CRC_EN 0x00000002
+#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither
+ Padding
+ short frames
+ nor CRC */
+#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC
+ only */
+#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004
+#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode
+ (MII/RMII/RGMII
+ 10/100bps) */
+#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode
+ (GMII/TBI/RTB/RGMII
+ 1000bps ) */
+#define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask
+ covering all
+ relevant
+ bits */
+
+/* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non
+ back-to-back
+ inter frame
+ gap part 1.
+ << shift */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non
+ back-to-back
+ inter frame
+ gap part 2.
+ << shift */
+#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG
+ Enforcement
+ << shift */
+#define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back
+ inter frame
+ gap << shift
+ */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back
+ inter frame gap part
+ 1. max val */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back
+ inter frame gap part
+ 2. max val */
+#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG
+ Enforcement max val */
+#define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter
+ frame gap max val */
+#define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000
+#define IPGIFG_NBTB_IPG_MASK 0x007F0000
+#define IPGIFG_MIN_IFG_MASK 0x0000FF00
+#define IPGIFG_BTB_IPG_MASK 0x0000007F
+
+/* UCC GETH HAFDUP (Half Duplex Register) */
+#define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate
+ Binary
+ Exponential
+ Backoff
+ Truncation
+ << shift */
+#define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary
+ Exponential Backoff
+ Truncation max val */
+#define HALFDUP_ALT_BEB 0x00080000 /* Alternate
+ Binary
+ Exponential
+ Backoff */
+#define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back
+ pressure no
+ backoff */
+#define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */
+#define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive
+ Defer */
+#define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum
+ Retransmission
+ << shift */
+#define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum
+ Retransmission max
+ val */
+#define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision
+ Window <<
+ shift */
+#define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max
+ val */
+#define HALFDUP_ALT_BEB_TR_MASK 0x00F00000
+#define HALFDUP_RETRANS_MASK 0x0000F000
+#define HALFDUP_COL_WINDOW_MASK 0x0000003F
+
+/* UCC GETH UCCS (Ethernet Status Register) */
+#define UCCS_BPR 0x02 /* Back pressure (in
+ half duplex mode) */
+#define UCCS_PAU 0x02 /* Pause state (in full
+ duplex mode) */
+#define UCCS_MPD 0x01 /* Magic Packet
+ Detected */
+
+/* UCC GETH IFSTAT (Interface Status Register) */
+#define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive
+ transmission
+ defer */
+
+/* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */
+#define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station
+ address 6th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station
+ address 5th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station
+ address 4th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station
+ address 3rd
+ octet <<
+ shift */
+
+/* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */
+#define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station
+ address 2nd
+ octet <<
+ shift */
+#define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station
+ address 1st
+ octet <<
+ shift */
+
+/* UCC GETH UEMPR (Ethernet Mac Parameter Register) */
+#define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time
+ value <<
+ shift */
+#define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended
+ pause time
+ value <<
+ shift */
+
+/* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */
+#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address
+ << shift */
+#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address
+ mask */
+
+/* UCC GETH UESCR (Ethernet Statistics Control Register) */
+#define UESCR_AUTOZ 0x8000 /* Automatically zero
+ addressed
+ statistical counter
+ values */
+#define UESCR_CLRCNT 0x4000 /* Clear all statistics
+ counters */
+#define UESCR_MAXCOV_SHIFT (15 - 7) /* Max
+ Coalescing
+ Value <<
+ shift */
+#define UESCR_SCOV_SHIFT (15 - 15) /* Status
+ Coalescing
+ Value <<
+ shift */
+
+/* UCC GETH UDSR (Data Synchronization Register) */
+#define UDSR_MAGIC 0x067E
+
+struct ucc_geth_thread_data_tx {
+ u8 res0[104];
+} __packed;
+
+struct ucc_geth_thread_data_rx {
+ u8 res0[40];
+} __packed;
+
+/* Send Queue Queue-Descriptor */
+struct ucc_geth_send_queue_qd {
+ u32 bd_ring_base; /* pointer to BD ring base address */
+ u8 res0[0x8];
+ u32 last_bd_completed_address;/* initialize to last entry in BD ring */
+ u8 res1[0x30];
+} __packed;
+
+struct ucc_geth_send_queue_mem_region {
+ struct ucc_geth_send_queue_qd sqqd[NUM_TX_QUEUES];
+} __packed;
+
+struct ucc_geth_thread_tx_pram {
+ u8 res0[64];
+} __packed;
+
+struct ucc_geth_thread_rx_pram {
+ u8 res0[128];
+} __packed;
+
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96
+
+struct ucc_geth_scheduler {
+ u16 cpucount0; /* CPU packet counter */
+ u16 cpucount1; /* CPU packet counter */
+ u16 cecount0; /* QE packet counter */
+ u16 cecount1; /* QE packet counter */
+ u16 cpucount2; /* CPU packet counter */
+ u16 cpucount3; /* CPU packet counter */
+ u16 cecount2; /* QE packet counter */
+ u16 cecount3; /* QE packet counter */
+ u16 cpucount4; /* CPU packet counter */
+ u16 cpucount5; /* CPU packet counter */
+ u16 cecount4; /* QE packet counter */
+ u16 cecount5; /* QE packet counter */
+ u16 cpucount6; /* CPU packet counter */
+ u16 cpucount7; /* CPU packet counter */
+ u16 cecount6; /* QE packet counter */
+ u16 cecount7; /* QE packet counter */
+ u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */
+ u32 rtsrshadow; /* temporary variable handled by QE */
+ u32 time; /* temporary variable handled by QE */
+ u32 ttl; /* temporary variable handled by QE */
+ u32 mblinterval; /* max burst length interval */
+ u16 nortsrbytetime; /* normalized value of byte time in tsr units */
+ u8 fracsiz; /* radix 2 log value of denom. of
+ NorTSRByteTime */
+ u8 res0[1];
+ u8 strictpriorityq; /* Strict Priority Mask register */
+ u8 txasap; /* Transmit ASAP register */
+ u8 extrabw; /* Extra BandWidth register */
+ u8 oldwfqmask; /* temporary variable handled by QE */
+ u8 weightfactor[NUM_TX_QUEUES];
+ /**< weight factor for queues */
+ u32 minw; /* temporary variable handled by QE */
+ u8 res1[0x70 - 0x64];
+} __packed;
+
+struct ucc_geth_tx_firmware_statistics_pram {
+ u32 sicoltx; /* single collision */
+ u32 mulcoltx; /* multiple collision */
+ u32 latecoltxfr; /* late collision */
+ u32 frabortduecol; /* frames aborted due to transmit collision */
+ u32 frlostinmactxer; /* frames lost due to internal MAC error
+ transmission that are not counted on any
+ other counter */
+ u32 carriersenseertx; /* carrier sense error */
+ u32 frtxok; /* frames transmitted OK */
+ u32 txfrexcessivedefer; /* frames with defferal time greater than
+ specified threshold */
+ u32 txpkts256; /* total packets (including bad) between 256
+ and 511 octets */
+ u32 txpkts512; /* total packets (including bad) between 512
+ and 1023 octets */
+ u32 txpkts1024; /* total packets (including bad) between 1024
+ and 1518 octets */
+ u32 txpktsjumbo; /* total packets (including bad) between 1024
+ and MAXLength octets */
+} __packed;
+
+struct ucc_geth_rx_firmware_statistics_pram {
+ u32 frrxfcser; /* frames with crc error */
+ u32 fraligner; /* frames with alignment error */
+ u32 inrangelenrxer; /* in range length error */
+ u32 outrangelenrxer; /* out of range length error */
+ u32 frtoolong; /* frame too long */
+ u32 runt; /* runt */
+ u32 verylongevent; /* very long event */
+ u32 symbolerror; /* symbol error */
+ u32 dropbsy; /* drop because of BD not ready */
+ u8 res0[0x8];
+ u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
+ or type mismatch) */
+ u32 underpkts; /* total frames less than 64 octets */
+ u32 pkts256; /* total frames (including bad) between 256 and
+ 511 octets */
+ u32 pkts512; /* total frames (including bad) between 512 and
+ 1023 octets */
+ u32 pkts1024; /* total frames (including bad) between 1024
+ and 1518 octets */
+ u32 pktsjumbo; /* total frames (including bad) between 1024
+ and MAXLength octets */
+ u32 frlossinmacer; /* frames lost because of internal MAC error
+ that is not counted in any other counter */
+ u32 pausefr; /* pause frames */
+ u8 res1[0x4];
+ u32 removevlan; /* total frames that had their VLAN tag removed
+ */
+ u32 replacevlan; /* total frames that had their VLAN tag
+ replaced */
+ u32 insertvlan; /* total frames that had their VLAN tag
+ inserted */
+} __packed;
+
+struct ucc_geth_rx_interrupt_coalescing_entry {
+ u32 interruptcoalescingmaxvalue; /* interrupt coalescing max
+ value */
+ u32 interruptcoalescingcounter; /* interrupt coalescing counter,
+ initialize to
+ interruptcoalescingmaxvalue */
+} __packed;
+
+struct ucc_geth_rx_interrupt_coalescing_table {
+ struct ucc_geth_rx_interrupt_coalescing_entry coalescingentry[NUM_RX_QUEUES];
+ /**< interrupt coalescing entry */
+} __packed;
+
+struct ucc_geth_rx_prefetched_bds {
+ struct qe_bd bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
+} __packed;
+
+struct ucc_geth_rx_bd_queues_entry {
+ u32 bdbaseptr; /* BD base pointer */
+ u32 bdptr; /* BD pointer */
+ u32 externalbdbaseptr; /* external BD base pointer */
+ u32 externalbdptr; /* external BD pointer */
+} __packed;
+
+struct ucc_geth_tx_global_pram {
+ u16 temoder;
+ u8 res0[0x38 - 0x02];
+ u32 sqptr; /* a base pointer to send queue memory region */
+ u32 schedulerbasepointer; /* a base pointer to scheduler memory
+ region */
+ u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */
+ u32 tstate; /* tx internal state. High byte contains
+ function code */
+ u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
+ u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
+ u32 tqptr; /* a base pointer to the Tx Queues Memory
+ Region */
+ u8 res2[0x80 - 0x74];
+} __packed;
+
+/* structure representing Extended Filtering Global Parameters in PRAM */
+struct ucc_geth_exf_global_pram {
+ u32 l2pcdptr; /* individual address filter, high */
+ u8 res0[0x10 - 0x04];
+} __packed;
+
+struct ucc_geth_rx_global_pram {
+ u32 remoder; /* ethernet mode reg. */
+ u32 rqptr; /* base pointer to the Rx Queues Memory Region*/
+ u32 res0[0x1];
+ u8 res1[0x20 - 0xC];
+ u16 typeorlen; /* cutoff point less than which, type/len field
+ is considered length */
+ u8 res2[0x1];
+ u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/
+ u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */
+ u8 res3[0x30 - 0x28];
+ u32 intcoalescingptr; /* Interrupt coalescing table pointer */
+ u8 res4[0x36 - 0x34];
+ u8 rstate; /* rx internal state. High byte contains
+ function code */
+ u8 res5[0x46 - 0x37];
+ u16 mrblr; /* max receive buffer length reg. */
+ u32 rbdqptr; /* base pointer to RxBD parameter table
+ description */
+ u16 mflr; /* max frame length reg. */
+ u16 minflr; /* min frame length reg. */
+ u16 maxd1; /* max dma1 length reg. */
+ u16 maxd2; /* max dma2 length reg. */
+ u32 ecamptr; /* external CAM address */
+ u32 l2qt; /* VLAN priority mapping table. */
+ u32 l3qt[0x8]; /* IP priority mapping table. */
+ u16 vlantype; /* vlan type */
+ u16 vlantci; /* default vlan tci */
+ u8 addressfiltering[64]; /* address filtering data structure */
+ u32 exfGlobalParam; /* base address for extended filtering global
+ parameters */
+ u8 res6[0x100 - 0xC4]; /* Initialize to zero */
+} __packed;
+
+#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
+
+/* structure representing InitEnet command */
+struct ucc_geth_init_pram {
+ u8 resinit1;
+ u8 resinit2;
+ u8 resinit3;
+ u8 resinit4;
+ u16 resinit5;
+ u8 res1[0x1];
+ u8 largestexternallookupkeysize;
+ u32 rgftgfrxglobal;
+ u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */
+ u8 res2[0x38 - 0x30];
+ u32 txglobal; /* tx global */
+ u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */
+ u8 res3[0x1];
+} __packed;
+
+#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
+#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
+
+#define ENET_INIT_PARAM_RISC_MASK 0x0000003f
+#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0
+#define ENET_INIT_PARAM_SNUM_MASK 0xff000000
+#define ENET_INIT_PARAM_SNUM_SHIFT 24
+
+#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06
+#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30
+#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff
+#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00
+#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400
+
+/* structure representing 82xx Address Filtering Enet Address in PRAM */
+struct ucc_geth_82xx_enet_address {
+ u8 res1[0x2];
+ u16 h; /* address (MSB) */
+ u16 m; /* address */
+ u16 l; /* address (LSB) */
+} __packed;
+
+/* structure representing 82xx Address Filtering PRAM */
+struct ucc_geth_82xx_address_filtering_pram {
+ u32 iaddr_h; /* individual address filter, high */
+ u32 iaddr_l; /* individual address filter, low */
+ u32 gaddr_h; /* group address filter, high */
+ u32 gaddr_l; /* group address filter, low */
+ struct ucc_geth_82xx_enet_address __iomem taddr;
+ struct ucc_geth_82xx_enet_address __iomem paddr[NUM_OF_PADDRS];
+ u8 res0[0x40 - 0x38];
+} __packed;
+
+/* GETH Tx firmware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+struct ucc_geth_tx_firmware_statistics {
+ u32 sicoltx; /* single collision */
+ u32 mulcoltx; /* multiple collision */
+ u32 latecoltxfr; /* late collision */
+ u32 frabortduecol; /* frames aborted due to transmit collision */
+ u32 frlostinmactxer; /* frames lost due to internal MAC error
+ transmission that are not counted on any
+ other counter */
+ u32 carriersenseertx; /* carrier sense error */
+ u32 frtxok; /* frames transmitted OK */
+ u32 txfrexcessivedefer; /* frames with defferal time greater than
+ specified threshold */
+ u32 txpkts256; /* total packets (including bad) between 256
+ and 511 octets */
+ u32 txpkts512; /* total packets (including bad) between 512
+ and 1023 octets */
+ u32 txpkts1024; /* total packets (including bad) between 1024
+ and 1518 octets */
+ u32 txpktsjumbo; /* total packets (including bad) between 1024
+ and MAXLength octets */
+} __packed;
+
+/* GETH Rx firmware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+struct ucc_geth_rx_firmware_statistics {
+ u32 frrxfcser; /* frames with crc error */
+ u32 fraligner; /* frames with alignment error */
+ u32 inrangelenrxer; /* in range length error */
+ u32 outrangelenrxer; /* out of range length error */
+ u32 frtoolong; /* frame too long */
+ u32 runt; /* runt */
+ u32 verylongevent; /* very long event */
+ u32 symbolerror; /* symbol error */
+ u32 dropbsy; /* drop because of BD not ready */
+ u8 res0[0x8];
+ u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
+ or type mismatch) */
+ u32 underpkts; /* total frames less than 64 octets */
+ u32 pkts256; /* total frames (including bad) between 256 and
+ 511 octets */
+ u32 pkts512; /* total frames (including bad) between 512 and
+ 1023 octets */
+ u32 pkts1024; /* total frames (including bad) between 1024
+ and 1518 octets */
+ u32 pktsjumbo; /* total frames (including bad) between 1024
+ and MAXLength octets */
+ u32 frlossinmacer; /* frames lost because of internal MAC error
+ that is not counted in any other counter */
+ u32 pausefr; /* pause frames */
+ u8 res1[0x4];
+ u32 removevlan; /* total frames that had their VLAN tag removed
+ */
+ u32 replacevlan; /* total frames that had their VLAN tag
+ replaced */
+ u32 insertvlan; /* total frames that had their VLAN tag
+ inserted */
+} __packed;
+
+/* GETH hardware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+struct ucc_geth_hardware_statistics {
+ u32 tx64; /* Total number of frames (including bad
+ frames) transmitted that were exactly of the
+ minimal length (64 for un tagged, 68 for
+ tagged, or with length exactly equal to the
+ parameter MINLength */
+ u32 tx127; /* Total number of frames (including bad
+ frames) transmitted that were between
+ MINLength (Including FCS length==4) and 127
+ octets */
+ u32 tx255; /* Total number of frames (including bad
+ frames) transmitted that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 rx64; /* Total number of frames received including
+ bad frames that were exactly of the mninimal
+ length (64 bytes) */
+ u32 rx127; /* Total number of frames (including bad
+ frames) received that were between MINLength
+ (Including FCS length==4) and 127 octets */
+ u32 rx255; /* Total number of frames (including bad
+ frames) received that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 txok; /* Total number of octets residing in frames
+ that where involved in successful
+ transmission */
+ u16 txcf; /* Total number of PAUSE control frames
+ transmitted by this MAC */
+ u32 tmca; /* Total number of frames that were transmitted
+ successfully with the group address bit set
+ that are not broadcast frames */
+ u32 tbca; /* Total number of frames transmitted
+ successfully that had destination address
+ field equal to the broadcast address */
+ u32 rxfok; /* Total number of frames received OK */
+ u32 rxbok; /* Total number of octets received OK */
+ u32 rbyt; /* Total number of octets received including
+ octets in bad frames. Must be implemented in
+ HW because it includes octets in frames that
+ never even reach the UCC */
+ u32 rmca; /* Total number of frames that were received
+ successfully with the group address bit set
+ that are not broadcast frames */
+ u32 rbca; /* Total number of frames received successfully
+ that had destination address equal to the
+ broadcast address */
+} __packed;
+
+/* UCC GETH Tx errors returned via TxConf callback */
+#define TX_ERRORS_DEF 0x0200
+#define TX_ERRORS_EXDEF 0x0100
+#define TX_ERRORS_LC 0x0080
+#define TX_ERRORS_RL 0x0040
+#define TX_ERRORS_RC_MASK 0x003C
+#define TX_ERRORS_RC_SHIFT 2
+#define TX_ERRORS_UN 0x0002
+#define TX_ERRORS_CSL 0x0001
+
+/* UCC GETH Rx errors returned via RxStore callback */
+#define RX_ERRORS_CMR 0x0200
+#define RX_ERRORS_M 0x0100
+#define RX_ERRORS_BC 0x0080
+#define RX_ERRORS_MC 0x0040
+
+/* Transmit BD. These are in addition to values defined in uccf. */
+#define T_VID 0x003c0000 /* insert VLAN id index mask. */
+#define T_DEF (((u32) TX_ERRORS_DEF ) << 16)
+#define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16)
+#define T_LC (((u32) TX_ERRORS_LC ) << 16)
+#define T_RL (((u32) TX_ERRORS_RL ) << 16)
+#define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16)
+#define T_UN (((u32) TX_ERRORS_UN ) << 16)
+#define T_CSL (((u32) TX_ERRORS_CSL ) << 16)
+#define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \
+ | T_UN | T_CSL) /* transmit errors to report */
+
+/* Receive BD. These are in addition to values defined in uccf. */
+#define R_LG 0x00200000 /* Frame length violation. */
+#define R_NO 0x00100000 /* Non-octet aligned frame. */
+#define R_SH 0x00080000 /* Short frame. */
+#define R_CR 0x00040000 /* CRC error. */
+#define R_OV 0x00020000 /* Overrun. */
+#define R_IPCH 0x00010000 /* IP checksum check failed. */
+#define R_CMR (((u32) RX_ERRORS_CMR ) << 16)
+#define R_M (((u32) RX_ERRORS_M ) << 16)
+#define R_BC (((u32) RX_ERRORS_BC ) << 16)
+#define R_MC (((u32) RX_ERRORS_MC ) << 16)
+#define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to
+ report */
+#define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \
+ R_OV | R_IPCH) /* receive errors to discard */
+
+/* Alignments */
+#define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256
+#define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128
+#define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128
+#define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64
+#define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values
+ based on num of
+ threads, but always
+ using the maximum is
+ easier */
+#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
+#define UCC_GETH_SCHEDULER_ALIGNMENT 8 /* This is a guess */
+#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64
+#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
+#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
+#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 8 /* This
+ is a
+ guess
+ */
+#define UCC_GETH_RX_BD_RING_ALIGNMENT 32
+#define UCC_GETH_TX_BD_RING_ALIGNMENT 32
+#define UCC_GETH_MRBLR_ALIGNMENT 128
+#define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4
+#define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32
+#define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64
+
+#define UCC_GETH_TAD_EF 0x80
+#define UCC_GETH_TAD_V 0x40
+#define UCC_GETH_TAD_REJ 0x20
+#define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2
+#define UCC_GETH_TAD_VTAG_OP_SHIFT 6
+#define UCC_GETH_TAD_V_NON_VTAG_OP 0x20
+#define UCC_GETH_TAD_RQOS_SHIFT 0
+#define UCC_GETH_TAD_V_PRIORITY_SHIFT 5
+#define UCC_GETH_TAD_CFI 0x10
+
+#define UCC_GETH_VLAN_PRIORITY_MAX 8
+#define UCC_GETH_IP_PRIORITY_MAX 64
+#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8
+#define UCC_GETH_RX_BD_RING_SIZE_MIN 8
+#define UCC_GETH_TX_BD_RING_SIZE_MIN 2
+#define UCC_GETH_BD_RING_SIZE_MAX 0xffff
+
+#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD
+
+/* Driver definitions */
+#define TX_BD_RING_LEN 0x10
+#define RX_BD_RING_LEN 0x20
+
+#define TX_RING_MOD_MASK(size) (size-1)
+#define RX_RING_MOD_MASK(size) (size-1)
+
+#define ENET_GROUP_ADDR 0x01 /* Group address mask
+ for ethernet
+ addresses */
+
+#define TX_TIMEOUT (1*HZ)
+#define SKB_ALLOC_TIMEOUT 100000
+#define PHY_INIT_TIMEOUT 100000
+#define PHY_CHANGE_TIME 2
+
+/* Fast Ethernet (10/100 Mbps) */
+#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
+ */
+#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */
+#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */
+#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
+ */
+#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
+#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs
+ due to errata */
+/* Gigabit Ethernet (1000 Mbps) */
+#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
+ FIFO size */
+#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
+#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
+#define UCC_GETH_UTFS_GIGA_INIT 4096/*2048*/ /* Tx virtual
+ FIFO size */
+#define UCC_GETH_UTFET_GIGA_INIT 2048/*1024*/ /* 1/2 utfs */
+#define UCC_GETH_UTFTT_GIGA_INIT 4096/*0x40*/ /* Tx virtual
+ FIFO size */
+
+#define UCC_GETH_REMODER_INIT 0 /* bits that must be
+ set */
+#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */
+
+/* Initial value for UPSMR */
+#define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1
+
+#define UCC_GETH_MACCFG1_INIT 0
+#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
+
+/* Ethernet Address Type. */
+enum enet_addr_type {
+ ENET_ADDR_TYPE_INDIVIDUAL,
+ ENET_ADDR_TYPE_GROUP,
+ ENET_ADDR_TYPE_BROADCAST
+};
+
+/* UCC GETH 82xx Ethernet Address Recognition Location */
+enum ucc_geth_enet_address_recognition_location {
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station
+ address */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional
+ station
+ address
+ paddr1 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional
+ station
+ address
+ paddr2 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional
+ station
+ address
+ paddr3 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional
+ station
+ address
+ paddr4 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual
+ hash */
+};
+
+/* UCC GETH vlan operation tagged */
+enum ucc_geth_vlan_operation_tagged {
+ UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */
+ UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG
+ = 0x1, /* Tagged - replace vid portion of q tag */
+ UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE
+ = 0x2, /* Tagged - if vid0 replace vid with default value */
+ UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME
+ = 0x3 /* Tagged - extract q tag from frame */
+};
+
+/* UCC GETH vlan operation non-tagged */
+enum ucc_geth_vlan_operation_non_tagged {
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged -
+ q tag insert
+ */
+};
+
+/* UCC GETH Rx Quality of Service Mode */
+enum ucc_geth_qos_mode {
+ UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */
+ UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue
+ determined
+ by L2
+ criteria */
+ UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue
+ determined
+ by L3
+ criteria */
+};
+
+/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together
+ for combined functionality */
+enum ucc_geth_statistics_gathering_mode {
+ UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No
+ statistics
+ gathering */
+ UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable
+ hardware
+ statistics
+ gathering
+ */
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable
+ firmware
+ tx
+ statistics
+ gathering
+ */
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable
+ firmware
+ rx
+ statistics
+ gathering
+ */
+};
+
+/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */
+enum ucc_geth_maccfg2_pad_and_crc_mode {
+ UCC_GETH_PAD_AND_CRC_MODE_NONE
+ = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding
+ short frames
+ nor CRC */
+ UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY
+ = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append
+ CRC only */
+ UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC =
+ MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC
+};
+
+/* UCC GETH upsmr Flow Control Mode */
+enum ucc_geth_flow_control_mode {
+ UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic
+ flow control
+ */
+ UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY
+ = 0x00004000 /* Send pause frame when RxFIFO reaches its
+ emergency threshold */
+};
+
+/* UCC GETH number of threads */
+enum ucc_geth_num_of_threads {
+ UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */
+ UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */
+ UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */
+ UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */
+ UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */
+};
+
+/* UCC GETH number of station addresses */
+enum ucc_geth_num_of_station_addresses {
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */
+};
+
+/* UCC GETH 82xx Ethernet Address Container */
+struct enet_addr_container {
+ u8 address[ETH_ALEN]; /* ethernet address */
+ enum ucc_geth_enet_address_recognition_location location; /* location in
+ 82xx address
+ recognition
+ hardware */
+ struct list_head node;
+};
+
+#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, struct enet_addr_container, node)
+
+/* UCC GETH Termination Action Descriptor (TAD) structure. */
+struct ucc_geth_tad_params {
+ int rx_non_dynamic_extended_features_mode;
+ int reject_frame;
+ enum ucc_geth_vlan_operation_tagged vtag_op;
+ enum ucc_geth_vlan_operation_non_tagged vnontag_op;
+ enum ucc_geth_qos_mode rqos;
+ u8 vpri;
+ u16 vid;
+};
+
+/* GETH protocol initialization structure */
+struct ucc_geth_info {
+ struct ucc_fast_info uf_info;
+ u8 numQueuesTx;
+ u8 numQueuesRx;
+ int ipCheckSumCheck;
+ int ipCheckSumGenerate;
+ int rxExtendedFiltering;
+ u32 extendedFilteringChainPointer;
+ u16 typeorlen;
+ int dynamicMaxFrameLength;
+ int dynamicMinFrameLength;
+ u8 nonBackToBackIfgPart1;
+ u8 nonBackToBackIfgPart2;
+ u8 miminumInterFrameGapEnforcement;
+ u8 backToBackInterFrameGap;
+ int ipAddressAlignment;
+ int lengthCheckRx;
+ u32 mblinterval;
+ u16 nortsrbytetime;
+ u8 fracsiz;
+ u8 strictpriorityq;
+ u8 txasap;
+ u8 extrabw;
+ int miiPreambleSupress;
+ u8 altBebTruncation;
+ int altBeb;
+ int backPressureNoBackoff;
+ int noBackoff;
+ int excessDefer;
+ u8 maxRetransmission;
+ u8 collisionWindow;
+ int pro;
+ int cap;
+ int rsh;
+ int rlpb;
+ int cam;
+ int bro;
+ int ecm;
+ int receiveFlowControl;
+ int transmitFlowControl;
+ u8 maxGroupAddrInHash;
+ u8 maxIndAddrInHash;
+ u8 prel;
+ u16 maxFrameLength;
+ u16 minFrameLength;
+ u16 maxD1Length;
+ u16 maxD2Length;
+ u16 vlantype;
+ u16 vlantci;
+ u32 ecamptr;
+ u32 eventRegMask;
+ u16 pausePeriod;
+ u16 extensionField;
+ struct device_node *phy_node;
+ struct device_node *tbi_node;
+ u8 weightfactor[NUM_TX_QUEUES];
+ u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
+ u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
+ u8 l3qt[UCC_GETH_IP_PRIORITY_MAX];
+ u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX];
+ u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
+ u16 bdRingLenTx[NUM_TX_QUEUES];
+ u16 bdRingLenRx[NUM_RX_QUEUES];
+ enum ucc_geth_num_of_station_addresses numStationAddresses;
+ enum qe_fltr_largest_external_tbl_lookup_key_size
+ largestexternallookupkeysize;
+ enum ucc_geth_statistics_gathering_mode statisticsMode;
+ enum ucc_geth_vlan_operation_tagged vlanOperationTagged;
+ enum ucc_geth_vlan_operation_non_tagged vlanOperationNonTagged;
+ enum ucc_geth_qos_mode rxQoSMode;
+ enum ucc_geth_flow_control_mode aufc;
+ enum ucc_geth_maccfg2_pad_and_crc_mode padAndCrc;
+ enum ucc_geth_num_of_threads numThreadsTx;
+ enum ucc_geth_num_of_threads numThreadsRx;
+ unsigned int riscTx;
+ unsigned int riscRx;
+};
+
+/* structure representing UCC GETH */
+struct ucc_geth_private {
+ struct ucc_geth_info *ug_info;
+ struct ucc_fast_private *uccf;
+ struct device *dev;
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct work_struct timeout_work;
+ struct ucc_geth __iomem *ug_regs;
+ struct ucc_geth_init_pram *p_init_enet_param_shadow;
+ struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param;
+ u32 exf_glbl_param_offset;
+ struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram;
+ u32 rx_glbl_pram_offset;
+ struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram;
+ u32 tx_glbl_pram_offset;
+ struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg;
+ u32 send_q_mem_reg_offset;
+ struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx;
+ u32 thread_dat_tx_offset;
+ struct ucc_geth_thread_data_rx __iomem *p_thread_data_rx;
+ u32 thread_dat_rx_offset;
+ struct ucc_geth_scheduler __iomem *p_scheduler;
+ u32 scheduler_offset;
+ struct ucc_geth_tx_firmware_statistics_pram __iomem *p_tx_fw_statistics_pram;
+ u32 tx_fw_statistics_pram_offset;
+ struct ucc_geth_rx_firmware_statistics_pram __iomem *p_rx_fw_statistics_pram;
+ u32 rx_fw_statistics_pram_offset;
+ struct ucc_geth_rx_interrupt_coalescing_table __iomem *p_rx_irq_coalescing_tbl;
+ u32 rx_irq_coalescing_tbl_offset;
+ struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl;
+ u32 rx_bd_qs_tbl_offset;
+ u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES];
+ u32 tx_bd_ring_offset[NUM_TX_QUEUES];
+ u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES];
+ u32 rx_bd_ring_offset[NUM_RX_QUEUES];
+ u8 __iomem *confBd[NUM_TX_QUEUES];
+ u8 __iomem *txBd[NUM_TX_QUEUES];
+ u8 __iomem *rxBd[NUM_RX_QUEUES];
+ int badFrame[NUM_RX_QUEUES];
+ u16 cpucount[NUM_TX_QUEUES];
+ u16 __iomem *p_cpucount[NUM_TX_QUEUES];
+ int indAddrRegUsed[NUM_OF_PADDRS];
+ u8 paddr[NUM_OF_PADDRS][ETH_ALEN]; /* ethernet address */
+ u8 numGroupAddrInHash;
+ u8 numIndAddrInHash;
+ u8 numIndAddrInReg;
+ int rx_extended_features;
+ int rx_non_dynamic_extended_features;
+ struct list_head conf_skbs;
+ struct list_head group_hash_q;
+ struct list_head ind_hash_q;
+ u32 saved_uccm;
+ spinlock_t lock;
+ /* pointers to arrays of skbuffs for tx and rx */
+ struct sk_buff **tx_skbuff[NUM_TX_QUEUES];
+ struct sk_buff **rx_skbuff[NUM_RX_QUEUES];
+ /* indices pointing to the next free sbk in skb arrays */
+ u16 skb_curtx[NUM_TX_QUEUES];
+ u16 skb_currx[NUM_RX_QUEUES];
+ /* index of the first skb which hasn't been transmitted yet. */
+ u16 skb_dirtytx[NUM_TX_QUEUES];
+
+ struct ugeth_mii_info *mii_info;
+ struct phy_device *phydev;
+ phy_interface_t phy_interface;
+ int max_speed;
+ uint32_t msg_enable;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+ int wol_en;
+
+ struct device_node *node;
+};
+
+void uec_set_ethtool_ops(struct net_device *netdev);
+int init_flow_control_params(u32 automatic_flow_control_mode,
+ int rx_flow_control_enable, int tx_flow_control_enable,
+ u16 pause_period, u16 extension_field,
+ u32 __iomem *upsmr_register, u32 __iomem *uempr_register,
+ u32 __iomem *maccfg1_register);
+
+
+#endif /* __UCC_GETH_H__ */
diff --git a/kernel/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/kernel/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
new file mode 100644
index 000000000..cc83350d5
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (c) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Description: QE UCC Gigabit Ethernet Ethtool API Set
+ *
+ * Author: Li Yang <leoli@freescale.com>
+ *
+ * Limitation:
+ * Can only get/set settings of the first queue.
+ * Need to re-open the interface manually after changing some parameters.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/types.h>
+
+#include "ucc_geth.h"
+
+static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
+ "tx-64-frames",
+ "tx-65-127-frames",
+ "tx-128-255-frames",
+ "rx-64-frames",
+ "rx-65-127-frames",
+ "rx-128-255-frames",
+ "tx-bytes-ok",
+ "tx-pause-frames",
+ "tx-multicast-frames",
+ "tx-broadcast-frames",
+ "rx-frames",
+ "rx-bytes-ok",
+ "rx-bytes-all",
+ "rx-multicast-frames",
+ "rx-broadcast-frames",
+ "stats-counter-carry",
+ "stats-counter-mask",
+ "rx-dropped-frames",
+};
+
+static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
+ "tx-single-collision",
+ "tx-multiple-collision",
+ "tx-late-collsion",
+ "tx-aborted-frames",
+ "tx-lost-frames",
+ "tx-carrier-sense-errors",
+ "tx-frames-ok",
+ "tx-excessive-differ-frames",
+ "tx-256-511-frames",
+ "tx-512-1023-frames",
+ "tx-1024-1518-frames",
+ "tx-jumbo-frames",
+};
+
+static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
+ "rx-crc-errors",
+ "rx-alignment-errors",
+ "rx-in-range-length-errors",
+ "rx-out-of-range-length-errors",
+ "rx-too-long-frames",
+ "rx-runt",
+ "rx-very-long-event",
+ "rx-symbol-errors",
+ "rx-busy-drop-frames",
+ "reserved",
+ "reserved",
+ "rx-mismatch-drop-frames",
+ "rx-small-than-64",
+ "rx-256-511-frames",
+ "rx-512-1023-frames",
+ "rx-1024-1518-frames",
+ "rx-jumbo-frames",
+ "rx-mac-error-loss",
+ "rx-pause-frames",
+ "reserved",
+ "rx-vlan-removed",
+ "rx-vlan-replaced",
+ "rx-vlan-inserted",
+ "rx-ip-checksum-errors",
+};
+
+#define UEC_HW_STATS_LEN ARRAY_SIZE(hw_stat_gstrings)
+#define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings)
+#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
+
+static int
+uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+
+ if (!phydev)
+ return -ENODEV;
+
+ ecmd->maxtxpkt = 1;
+ ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
+
+ return phy_ethtool_gset(phydev, ecmd);
+}
+
+static int
+uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, ecmd);
+}
+
+static void
+uec_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+
+ pause->autoneg = ugeth->phydev->autoneg;
+
+ if (ugeth->ug_info->receiveFlowControl)
+ pause->rx_pause = 1;
+ if (ugeth->ug_info->transmitFlowControl)
+ pause->tx_pause = 1;
+}
+
+static int
+uec_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ int ret = 0;
+
+ ugeth->ug_info->receiveFlowControl = pause->rx_pause;
+ ugeth->ug_info->transmitFlowControl = pause->tx_pause;
+
+ if (ugeth->phydev->autoneg) {
+ if (netif_running(netdev)) {
+ /* FIXME: automatically restart */
+ netdev_info(netdev, "Please re-open the interface\n");
+ }
+ } else {
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+
+ ret = init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ ug_info->transmitFlowControl,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &ugeth->uccf->uf_regs->upsmr,
+ &ugeth->ug_regs->uempr,
+ &ugeth->ug_regs->maccfg1);
+ }
+
+ return ret;
+}
+
+static uint32_t
+uec_get_msglevel(struct net_device *netdev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ return ugeth->msg_enable;
+}
+
+static void
+uec_set_msglevel(struct net_device *netdev, uint32_t data)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ ugeth->msg_enable = data;
+}
+
+static int
+uec_get_regs_len(struct net_device *netdev)
+{
+ return sizeof(struct ucc_geth);
+}
+
+static void
+uec_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs, void *p)
+{
+ int i;
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 __iomem *ug_regs = (u32 __iomem *)ugeth->ug_regs;
+ u32 *buff = p;
+
+ for (i = 0; i < sizeof(struct ucc_geth) / sizeof(u32); i++)
+ buff[i] = in_be32(&ug_regs[i]);
+}
+
+static void
+uec_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ int queue = 0;
+
+ ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+ ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+ ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+ ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX;
+
+ ring->rx_pending = ug_info->bdRingLenRx[queue];
+ ring->rx_mini_pending = ug_info->bdRingLenRx[queue];
+ ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue];
+ ring->tx_pending = ug_info->bdRingLenTx[queue];
+}
+
+static int
+uec_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ int queue = 0, ret = 0;
+
+ if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) {
+ netdev_info(netdev, "RxBD ring size must be no smaller than %d\n",
+ UCC_GETH_RX_BD_RING_SIZE_MIN);
+ return -EINVAL;
+ }
+ if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) {
+ netdev_info(netdev, "RxBD ring size must be multiple of %d\n",
+ UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT);
+ return -EINVAL;
+ }
+ if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) {
+ netdev_info(netdev, "TxBD ring size must be no smaller than %d\n",
+ UCC_GETH_TX_BD_RING_SIZE_MIN);
+ return -EINVAL;
+ }
+
+ ug_info->bdRingLenRx[queue] = ring->rx_pending;
+ ug_info->bdRingLenTx[queue] = ring->tx_pending;
+
+ if (netif_running(netdev)) {
+ /* FIXME: restart automatically */
+ netdev_info(netdev, "Please re-open the interface\n");
+ }
+
+ return ret;
+}
+
+static int uec_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 stats_mode = ugeth->ug_info->statisticsMode;
+ int len = 0;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE)
+ len += UEC_HW_STATS_LEN;
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX)
+ len += UEC_TX_FW_STATS_LEN;
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
+ len += UEC_RX_FW_STATS_LEN;
+
+ return len;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 stats_mode = ugeth->ug_info->statisticsMode;
+
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
+ memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN *
+ ETH_GSTRING_LEN);
+ buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN *
+ ETH_GSTRING_LEN);
+ buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX)
+ memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN *
+ ETH_GSTRING_LEN);
+}
+
+static void uec_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ u32 stats_mode = ugeth->ug_info->statisticsMode;
+ u32 __iomem *base;
+ int i, j = 0;
+
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
+ if (ugeth->ug_regs)
+ base = (u32 __iomem *)&ugeth->ug_regs->tx64;
+ else
+ base = NULL;
+
+ for (i = 0; i < UEC_HW_STATS_LEN; i++)
+ data[j++] = base ? in_be32(&base[i]) : 0;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram;
+ for (i = 0; i < UEC_TX_FW_STATS_LEN; i++)
+ data[j++] = base ? in_be32(&base[i]) : 0;
+ }
+ if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
+ base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram;
+ for (i = 0; i < UEC_RX_FW_STATS_LEN; i++)
+ data[j++] = base ? in_be32(&base[i]) : 0;
+ }
+}
+
+static int uec_nway_reset(struct net_device *netdev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+
+ return phy_start_aneg(ugeth->phydev);
+}
+
+/* Report driver information */
+static void
+uec_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
+ drvinfo->eedump_len = 0;
+ drvinfo->regdump_len = uec_get_regs_len(netdev);
+}
+
+#ifdef CONFIG_PM
+
+static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+
+ if (phydev && phydev->irq)
+ wol->supported |= WAKE_PHY;
+ if (qe_alive_during_sleep())
+ wol->supported |= WAKE_MAGIC;
+
+ wol->wolopts = ugeth->wol_en;
+}
+
+static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(netdev);
+ struct phy_device *phydev = ugeth->phydev;
+
+ if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+ else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq))
+ return -EINVAL;
+ else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep())
+ return -EINVAL;
+
+ ugeth->wol_en = wol->wolopts;
+ device_set_wakeup_enable(&netdev->dev, ugeth->wol_en);
+
+ return 0;
+}
+
+#else
+#define uec_get_wol NULL
+#define uec_set_wol NULL
+#endif /* CONFIG_PM */
+
+static const struct ethtool_ops uec_ethtool_ops = {
+ .get_settings = uec_get_settings,
+ .set_settings = uec_set_settings,
+ .get_drvinfo = uec_get_drvinfo,
+ .get_regs_len = uec_get_regs_len,
+ .get_regs = uec_get_regs,
+ .get_msglevel = uec_get_msglevel,
+ .set_msglevel = uec_set_msglevel,
+ .nway_reset = uec_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = uec_get_ringparam,
+ .set_ringparam = uec_set_ringparam,
+ .get_pauseparam = uec_get_pauseparam,
+ .set_pauseparam = uec_set_pauseparam,
+ .get_sset_count = uec_get_sset_count,
+ .get_strings = uec_get_strings,
+ .get_ethtool_stats = uec_get_ethtool_stats,
+ .get_wol = uec_get_wol,
+ .set_wol = uec_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+void uec_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &uec_ethtool_ops;
+}
diff --git a/kernel/drivers/net/ethernet/freescale/xgmac_mdio.c b/kernel/drivers/net/ethernet/freescale/xgmac_mdio.c
new file mode 100644
index 000000000..7b8fe866f
--- /dev/null
+++ b/kernel/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -0,0 +1,333 @@
+/*
+ * QorIQ 10G MDIO Controller
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Authors: Andy Fleming <afleming@freescale.com>
+ * Timur Tabi <timur@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+
+/* Number of microseconds to wait for a register to respond */
+#define TIMEOUT 1000
+
+struct tgec_mdio_controller {
+ __be32 reserved[12];
+ __be32 mdio_stat; /* MDIO configuration and status */
+ __be32 mdio_ctl; /* MDIO control */
+ __be32 mdio_data; /* MDIO data */
+ __be32 mdio_addr; /* MDIO address */
+} __packed;
+
+#define MDIO_STAT_ENC BIT(6)
+#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
+#define MDIO_STAT_BSY BIT(0)
+#define MDIO_STAT_RD_ER BIT(1)
+#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
+#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
+#define MDIO_CTL_PRE_DIS BIT(10)
+#define MDIO_CTL_SCAN_EN BIT(11)
+#define MDIO_CTL_POST_INC BIT(14)
+#define MDIO_CTL_READ BIT(15)
+
+#define MDIO_DATA(x) (x & 0xffff)
+#define MDIO_DATA_BSY BIT(31)
+
+struct mdio_fsl_priv {
+ struct tgec_mdio_controller __iomem *mdio_base;
+ bool is_little_endian;
+};
+
+static u32 xgmac_read32(void __iomem *regs,
+ bool is_little_endian)
+{
+ if (is_little_endian)
+ return ioread32(regs);
+ else
+ return ioread32be(regs);
+}
+
+static void xgmac_write32(u32 value,
+ void __iomem *regs,
+ bool is_little_endian)
+{
+ if (is_little_endian)
+ iowrite32(value, regs);
+ else
+ iowrite32be(value, regs);
+}
+
+/*
+ * Wait until the MDIO bus is free
+ */
+static int xgmac_wait_until_free(struct device *dev,
+ struct tgec_mdio_controller __iomem *regs,
+ bool is_little_endian)
+{
+ unsigned int timeout;
+
+ /* Wait till the bus is free */
+ timeout = TIMEOUT;
+ while ((xgmac_read32(&regs->mdio_stat, is_little_endian) &
+ MDIO_STAT_BSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ if (!timeout) {
+ dev_err(dev, "timeout waiting for bus to be free\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Wait till the MDIO read or write operation is complete
+ */
+static int xgmac_wait_until_done(struct device *dev,
+ struct tgec_mdio_controller __iomem *regs,
+ bool is_little_endian)
+{
+ unsigned int timeout;
+
+ /* Wait till the MDIO write is complete */
+ timeout = TIMEOUT;
+ while ((xgmac_read32(&regs->mdio_stat, is_little_endian) &
+ MDIO_STAT_BSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ if (!timeout) {
+ dev_err(dev, "timeout waiting for operation to complete\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Write value to the PHY for this device to the register at regnum,waiting
+ * until the write is done before it returns. All PHY configuration has to be
+ * done through the TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ uint16_t dev_addr;
+ u32 mdio_ctl, mdio_stat;
+ int ret;
+ bool endian = priv->is_little_endian;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
+ if (regnum & MII_ADDR_C45) {
+ /* Clause 45 (ie 10G) */
+ dev_addr = (regnum >> 16) & 0x1f;
+ mdio_stat |= MDIO_STAT_ENC;
+ } else {
+ /* Clause 22 (ie 1G) */
+ dev_addr = regnum & 0x1f;
+ mdio_stat &= ~MDIO_STAT_ENC;
+ }
+
+ xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Set the port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
+
+ /* Set the register address */
+ if (regnum & MII_ADDR_C45) {
+ xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+ }
+
+ /* Write the value to the register */
+ xgmac_write32(MDIO_DATA(value), &regs->mdio_data, endian);
+
+ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Reads from register regnum in the PHY for device dev, returning the value.
+ * Clears miimcom first. All PHY configuration has to be done through the
+ * TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ uint16_t dev_addr;
+ uint32_t mdio_stat;
+ uint32_t mdio_ctl;
+ uint16_t value;
+ int ret;
+ bool endian = priv->is_little_endian;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
+ if (regnum & MII_ADDR_C45) {
+ dev_addr = (regnum >> 16) & 0x1f;
+ mdio_stat |= MDIO_STAT_ENC;
+ } else {
+ dev_addr = regnum & 0x1f;
+ mdio_stat &= ~MDIO_STAT_ENC;
+ }
+
+ xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Set the Port and Device Addrs */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
+
+ /* Set the register address */
+ if (regnum & MII_ADDR_C45) {
+ xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+ }
+
+ /* Initiate the read */
+ xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian);
+
+ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Return all Fs if nothing was there */
+ if (xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) {
+ dev_err(&bus->dev,
+ "Error while reading PHY%d reg at %d.%hhu\n",
+ phy_id, dev_addr, regnum);
+ return 0xffff;
+ }
+
+ value = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
+ dev_dbg(&bus->dev, "read %04x\n", value);
+
+ return value;
+}
+
+static int xgmac_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mii_bus *bus;
+ struct resource res;
+ struct mdio_fsl_priv *priv;
+ int ret;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(&pdev->dev, "could not obtain address\n");
+ return ret;
+ }
+
+ bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "Freescale XGMAC MDIO Bus";
+ bus->read = xgmac_mdio_read;
+ bus->write = xgmac_mdio_write;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
+
+ /* Set the PHY base address */
+ priv = bus->priv;
+ priv->mdio_base = of_iomap(np, 0);
+ if (!priv->mdio_base) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ if (of_get_property(pdev->dev.of_node,
+ "little-endian", NULL))
+ priv->is_little_endian = true;
+ else
+ priv->is_little_endian = false;
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register MDIO bus\n");
+ goto err_registration;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+
+err_registration:
+ iounmap(priv->mdio_base);
+
+err_ioremap:
+ mdiobus_free(bus);
+
+ return ret;
+}
+
+static int xgmac_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+ iounmap(bus->priv);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static const struct of_device_id xgmac_mdio_match[] = {
+ {
+ .compatible = "fsl,fman-xmdio",
+ },
+ {
+ .compatible = "fsl,fman-memac-mdio",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
+
+static struct platform_driver xgmac_mdio_driver = {
+ .driver = {
+ .name = "fsl-fman_xmdio",
+ .of_match_table = xgmac_mdio_match,
+ },
+ .probe = xgmac_mdio_probe,
+ .remove = xgmac_mdio_remove,
+};
+
+module_platform_driver(xgmac_mdio_driver);
+
+MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller");
+MODULE_LICENSE("GPL v2");