summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/net/ethernet/renesas
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/net/ethernet/renesas')
-rw-r--r--kernel/drivers/net/ethernet/renesas/Kconfig29
-rw-r--r--kernel/drivers/net/ethernet/renesas/Makefile4
-rw-r--r--kernel/drivers/net/ethernet/renesas/ravb.h842
-rw-r--r--kernel/drivers/net/ethernet/renesas/ravb_main.c1894
-rw-r--r--kernel/drivers/net/ethernet/renesas/ravb_ptp.c359
-rw-r--r--kernel/drivers/net/ethernet/renesas/sh_eth.c198
-rw-r--r--kernel/drivers/net/ethernet/renesas/sh_eth.h60
7 files changed, 3246 insertions, 140 deletions
diff --git a/kernel/drivers/net/ethernet/renesas/Kconfig b/kernel/drivers/net/ethernet/renesas/Kconfig
index 196e98a2d..270c4c9ca 100644
--- a/kernel/drivers/net/ethernet/renesas/Kconfig
+++ b/kernel/drivers/net/ethernet/renesas/Kconfig
@@ -2,6 +2,19 @@
# Renesas device configuration
#
+config NET_VENDOR_RENESAS
+ bool "Renesas devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Renesas devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_RENESAS
+
config SH_ETH
tristate "Renesas SuperH Ethernet support"
depends on HAS_DMA
@@ -15,3 +28,19 @@ config SH_ETH
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
R8A7740, R8A777x and R8A779x.
+
+config RAVB
+ tristate "Renesas Ethernet AVB support"
+ depends on HAS_DMA
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select CRC32
+ select MII
+ select MDIO_BITBANG
+ select PHYLIB
+ select PTP_1588_CLOCK
+ help
+ Renesas Ethernet AVB device driver.
+ This driver supports the following SoCs:
+ - R8A779x.
+
+endif # NET_VENDOR_RENESAS
diff --git a/kernel/drivers/net/ethernet/renesas/Makefile b/kernel/drivers/net/ethernet/renesas/Makefile
index 1c278a8e0..a05102a7d 100644
--- a/kernel/drivers/net/ethernet/renesas/Makefile
+++ b/kernel/drivers/net/ethernet/renesas/Makefile
@@ -3,3 +3,7 @@
#
obj-$(CONFIG_SH_ETH) += sh_eth.o
+
+ravb-objs := ravb_main.o ravb_ptp.o
+
+obj-$(CONFIG_RAVB) += ravb.o
diff --git a/kernel/drivers/net/ethernet/renesas/ravb.h b/kernel/drivers/net/ethernet/renesas/ravb.h
new file mode 100644
index 000000000..0623fff93
--- /dev/null
+++ b/kernel/drivers/net/ethernet/renesas/ravb.h
@@ -0,0 +1,842 @@
+/* Renesas Ethernet AVB device driver
+ *
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * Based on the SuperH Ethernet driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __RAVB_H__
+#define __RAVB_H__
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
+
+#define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */
+#define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */
+#define NC_TX_RING_SIZE 64 /* TX ring size for Network Control */
+#define NC_RX_RING_SIZE 64 /* RX ring size for Network Control */
+#define BE_TX_RING_MIN 64
+#define BE_RX_RING_MIN 64
+#define BE_TX_RING_MAX 1024
+#define BE_RX_RING_MAX 2048
+
+#define PKT_BUF_SZ 1538
+
+/* Driver's parameters */
+#define RAVB_ALIGN 128
+
+/* Hardware time stamp */
+#define RAVB_TXTSTAMP_VALID 0x00000001 /* TX timestamp valid */
+#define RAVB_TXTSTAMP_ENABLED 0x00000010 /* Enable TX timestamping */
+
+#define RAVB_RXTSTAMP_VALID 0x00000001 /* RX timestamp valid */
+#define RAVB_RXTSTAMP_TYPE 0x00000006 /* RX type mask */
+#define RAVB_RXTSTAMP_TYPE_V2_L2_EVENT 0x00000002
+#define RAVB_RXTSTAMP_TYPE_ALL 0x00000006
+#define RAVB_RXTSTAMP_ENABLED 0x00000010 /* Enable RX timestamping */
+
+enum ravb_reg {
+ /* AVB-DMAC registers */
+ CCC = 0x0000,
+ DBAT = 0x0004,
+ DLR = 0x0008,
+ CSR = 0x000C,
+ CDAR0 = 0x0010,
+ CDAR1 = 0x0014,
+ CDAR2 = 0x0018,
+ CDAR3 = 0x001C,
+ CDAR4 = 0x0020,
+ CDAR5 = 0x0024,
+ CDAR6 = 0x0028,
+ CDAR7 = 0x002C,
+ CDAR8 = 0x0030,
+ CDAR9 = 0x0034,
+ CDAR10 = 0x0038,
+ CDAR11 = 0x003C,
+ CDAR12 = 0x0040,
+ CDAR13 = 0x0044,
+ CDAR14 = 0x0048,
+ CDAR15 = 0x004C,
+ CDAR16 = 0x0050,
+ CDAR17 = 0x0054,
+ CDAR18 = 0x0058,
+ CDAR19 = 0x005C,
+ CDAR20 = 0x0060,
+ CDAR21 = 0x0064,
+ ESR = 0x0088,
+ RCR = 0x0090,
+ RQC0 = 0x0094,
+ RQC1 = 0x0098,
+ RQC2 = 0x009C,
+ RQC3 = 0x00A0,
+ RQC4 = 0x00A4,
+ RPC = 0x00B0,
+ UFCW = 0x00BC,
+ UFCS = 0x00C0,
+ UFCV0 = 0x00C4,
+ UFCV1 = 0x00C8,
+ UFCV2 = 0x00CC,
+ UFCV3 = 0x00D0,
+ UFCV4 = 0x00D4,
+ UFCD0 = 0x00E0,
+ UFCD1 = 0x00E4,
+ UFCD2 = 0x00E8,
+ UFCD3 = 0x00EC,
+ UFCD4 = 0x00F0,
+ SFO = 0x00FC,
+ SFP0 = 0x0100,
+ SFP1 = 0x0104,
+ SFP2 = 0x0108,
+ SFP3 = 0x010C,
+ SFP4 = 0x0110,
+ SFP5 = 0x0114,
+ SFP6 = 0x0118,
+ SFP7 = 0x011C,
+ SFP8 = 0x0120,
+ SFP9 = 0x0124,
+ SFP10 = 0x0128,
+ SFP11 = 0x012C,
+ SFP12 = 0x0130,
+ SFP13 = 0x0134,
+ SFP14 = 0x0138,
+ SFP15 = 0x013C,
+ SFP16 = 0x0140,
+ SFP17 = 0x0144,
+ SFP18 = 0x0148,
+ SFP19 = 0x014C,
+ SFP20 = 0x0150,
+ SFP21 = 0x0154,
+ SFP22 = 0x0158,
+ SFP23 = 0x015C,
+ SFP24 = 0x0160,
+ SFP25 = 0x0164,
+ SFP26 = 0x0168,
+ SFP27 = 0x016C,
+ SFP28 = 0x0170,
+ SFP29 = 0x0174,
+ SFP30 = 0x0178,
+ SFP31 = 0x017C,
+ SFM0 = 0x01C0,
+ SFM1 = 0x01C4,
+ TGC = 0x0300,
+ TCCR = 0x0304,
+ TSR = 0x0308,
+ TFA0 = 0x0310,
+ TFA1 = 0x0314,
+ TFA2 = 0x0318,
+ CIVR0 = 0x0320,
+ CIVR1 = 0x0324,
+ CDVR0 = 0x0328,
+ CDVR1 = 0x032C,
+ CUL0 = 0x0330,
+ CUL1 = 0x0334,
+ CLL0 = 0x0338,
+ CLL1 = 0x033C,
+ DIC = 0x0350,
+ DIS = 0x0354,
+ EIC = 0x0358,
+ EIS = 0x035C,
+ RIC0 = 0x0360,
+ RIS0 = 0x0364,
+ RIC1 = 0x0368,
+ RIS1 = 0x036C,
+ RIC2 = 0x0370,
+ RIS2 = 0x0374,
+ TIC = 0x0378,
+ TIS = 0x037C,
+ ISS = 0x0380,
+ GCCR = 0x0390,
+ GMTT = 0x0394,
+ GPTC = 0x0398,
+ GTI = 0x039C,
+ GTO0 = 0x03A0,
+ GTO1 = 0x03A4,
+ GTO2 = 0x03A8,
+ GIC = 0x03AC,
+ GIS = 0x03B0,
+ GCPT = 0x03B4, /* Undocumented? */
+ GCT0 = 0x03B8,
+ GCT1 = 0x03BC,
+ GCT2 = 0x03C0,
+
+ /* E-MAC registers */
+ ECMR = 0x0500,
+ RFLR = 0x0508,
+ ECSR = 0x0510,
+ ECSIPR = 0x0518,
+ PIR = 0x0520,
+ PSR = 0x0528,
+ PIPR = 0x052c,
+ MPR = 0x0558,
+ PFTCR = 0x055c,
+ PFRCR = 0x0560,
+ GECMR = 0x05b0,
+ MAHR = 0x05c0,
+ MALR = 0x05c8,
+ TROCR = 0x0700, /* Undocumented? */
+ CDCR = 0x0708, /* Undocumented? */
+ LCCR = 0x0710, /* Undocumented? */
+ CEFCR = 0x0740,
+ FRECR = 0x0748,
+ TSFRCR = 0x0750,
+ TLFRCR = 0x0758,
+ RFCR = 0x0760,
+ CERCR = 0x0768, /* Undocumented? */
+ CEECR = 0x0770, /* Undocumented? */
+ MAFCR = 0x0778,
+};
+
+
+/* Register bits of the Ethernet AVB */
+/* CCC */
+enum CCC_BIT {
+ CCC_OPC = 0x00000003,
+ CCC_OPC_RESET = 0x00000000,
+ CCC_OPC_CONFIG = 0x00000001,
+ CCC_OPC_OPERATION = 0x00000002,
+ CCC_DTSR = 0x00000100,
+ CCC_CSEL = 0x00030000,
+ CCC_CSEL_HPB = 0x00010000,
+ CCC_CSEL_ETH_TX = 0x00020000,
+ CCC_CSEL_GMII_REF = 0x00030000,
+ CCC_BOC = 0x00100000, /* Undocumented? */
+ CCC_LBME = 0x01000000,
+};
+
+/* CSR */
+enum CSR_BIT {
+ CSR_OPS = 0x0000000F,
+ CSR_OPS_RESET = 0x00000001,
+ CSR_OPS_CONFIG = 0x00000002,
+ CSR_OPS_OPERATION = 0x00000004,
+ CSR_OPS_STANDBY = 0x00000008, /* Undocumented? */
+ CSR_DTS = 0x00000100,
+ CSR_TPO0 = 0x00010000,
+ CSR_TPO1 = 0x00020000,
+ CSR_TPO2 = 0x00040000,
+ CSR_TPO3 = 0x00080000,
+ CSR_RPO = 0x00100000,
+};
+
+/* ESR */
+enum ESR_BIT {
+ ESR_EQN = 0x0000001F,
+ ESR_ET = 0x00000F00,
+ ESR_EIL = 0x00001000,
+};
+
+/* RCR */
+enum RCR_BIT {
+ RCR_EFFS = 0x00000001,
+ RCR_ENCF = 0x00000002,
+ RCR_ESF = 0x0000000C,
+ RCR_ETS0 = 0x00000010,
+ RCR_ETS2 = 0x00000020,
+ RCR_RFCL = 0x1FFF0000,
+};
+
+/* RQC0/1/2/3/4 */
+enum RQC_BIT {
+ RQC_RSM0 = 0x00000003,
+ RQC_UFCC0 = 0x00000030,
+ RQC_RSM1 = 0x00000300,
+ RQC_UFCC1 = 0x00003000,
+ RQC_RSM2 = 0x00030000,
+ RQC_UFCC2 = 0x00300000,
+ RQC_RSM3 = 0x03000000,
+ RQC_UFCC3 = 0x30000000,
+};
+
+/* RPC */
+enum RPC_BIT {
+ RPC_PCNT = 0x00000700,
+ RPC_DCNT = 0x00FF0000,
+};
+
+/* UFCW */
+enum UFCW_BIT {
+ UFCW_WL0 = 0x0000003F,
+ UFCW_WL1 = 0x00003F00,
+ UFCW_WL2 = 0x003F0000,
+ UFCW_WL3 = 0x3F000000,
+};
+
+/* UFCS */
+enum UFCS_BIT {
+ UFCS_SL0 = 0x0000003F,
+ UFCS_SL1 = 0x00003F00,
+ UFCS_SL2 = 0x003F0000,
+ UFCS_SL3 = 0x3F000000,
+};
+
+/* UFCV0/1/2/3/4 */
+enum UFCV_BIT {
+ UFCV_CV0 = 0x0000003F,
+ UFCV_CV1 = 0x00003F00,
+ UFCV_CV2 = 0x003F0000,
+ UFCV_CV3 = 0x3F000000,
+};
+
+/* UFCD0/1/2/3/4 */
+enum UFCD_BIT {
+ UFCD_DV0 = 0x0000003F,
+ UFCD_DV1 = 0x00003F00,
+ UFCD_DV2 = 0x003F0000,
+ UFCD_DV3 = 0x3F000000,
+};
+
+/* SFO */
+enum SFO_BIT {
+ SFO_FPB = 0x0000003F,
+};
+
+/* RTC */
+enum RTC_BIT {
+ RTC_MFL0 = 0x00000FFF,
+ RTC_MFL1 = 0x0FFF0000,
+};
+
+/* TGC */
+enum TGC_BIT {
+ TGC_TSM0 = 0x00000001,
+ TGC_TSM1 = 0x00000002,
+ TGC_TSM2 = 0x00000004,
+ TGC_TSM3 = 0x00000008,
+ TGC_TQP = 0x00000030,
+ TGC_TQP_NONAVB = 0x00000000,
+ TGC_TQP_AVBMODE1 = 0x00000010,
+ TGC_TQP_AVBMODE2 = 0x00000030,
+ TGC_TBD0 = 0x00000300,
+ TGC_TBD1 = 0x00003000,
+ TGC_TBD2 = 0x00030000,
+ TGC_TBD3 = 0x00300000,
+};
+
+/* TCCR */
+enum TCCR_BIT {
+ TCCR_TSRQ0 = 0x00000001,
+ TCCR_TSRQ1 = 0x00000002,
+ TCCR_TSRQ2 = 0x00000004,
+ TCCR_TSRQ3 = 0x00000008,
+ TCCR_TFEN = 0x00000100,
+ TCCR_TFR = 0x00000200,
+};
+
+/* TSR */
+enum TSR_BIT {
+ TSR_CCS0 = 0x00000003,
+ TSR_CCS1 = 0x0000000C,
+ TSR_TFFL = 0x00000700,
+};
+
+/* TFA2 */
+enum TFA2_BIT {
+ TFA2_TSV = 0x0000FFFF,
+ TFA2_TST = 0x03FF0000,
+};
+
+/* DIC */
+enum DIC_BIT {
+ DIC_DPE1 = 0x00000002,
+ DIC_DPE2 = 0x00000004,
+ DIC_DPE3 = 0x00000008,
+ DIC_DPE4 = 0x00000010,
+ DIC_DPE5 = 0x00000020,
+ DIC_DPE6 = 0x00000040,
+ DIC_DPE7 = 0x00000080,
+ DIC_DPE8 = 0x00000100,
+ DIC_DPE9 = 0x00000200,
+ DIC_DPE10 = 0x00000400,
+ DIC_DPE11 = 0x00000800,
+ DIC_DPE12 = 0x00001000,
+ DIC_DPE13 = 0x00002000,
+ DIC_DPE14 = 0x00004000,
+ DIC_DPE15 = 0x00008000,
+};
+
+/* DIS */
+enum DIS_BIT {
+ DIS_DPF1 = 0x00000002,
+ DIS_DPF2 = 0x00000004,
+ DIS_DPF3 = 0x00000008,
+ DIS_DPF4 = 0x00000010,
+ DIS_DPF5 = 0x00000020,
+ DIS_DPF6 = 0x00000040,
+ DIS_DPF7 = 0x00000080,
+ DIS_DPF8 = 0x00000100,
+ DIS_DPF9 = 0x00000200,
+ DIS_DPF10 = 0x00000400,
+ DIS_DPF11 = 0x00000800,
+ DIS_DPF12 = 0x00001000,
+ DIS_DPF13 = 0x00002000,
+ DIS_DPF14 = 0x00004000,
+ DIS_DPF15 = 0x00008000,
+};
+
+/* EIC */
+enum EIC_BIT {
+ EIC_MREE = 0x00000001,
+ EIC_MTEE = 0x00000002,
+ EIC_QEE = 0x00000004,
+ EIC_SEE = 0x00000008,
+ EIC_CLLE0 = 0x00000010,
+ EIC_CLLE1 = 0x00000020,
+ EIC_CULE0 = 0x00000040,
+ EIC_CULE1 = 0x00000080,
+ EIC_TFFE = 0x00000100,
+};
+
+/* EIS */
+enum EIS_BIT {
+ EIS_MREF = 0x00000001,
+ EIS_MTEF = 0x00000002,
+ EIS_QEF = 0x00000004,
+ EIS_SEF = 0x00000008,
+ EIS_CLLF0 = 0x00000010,
+ EIS_CLLF1 = 0x00000020,
+ EIS_CULF0 = 0x00000040,
+ EIS_CULF1 = 0x00000080,
+ EIS_TFFF = 0x00000100,
+ EIS_QFS = 0x00010000,
+};
+
+/* RIC0 */
+enum RIC0_BIT {
+ RIC0_FRE0 = 0x00000001,
+ RIC0_FRE1 = 0x00000002,
+ RIC0_FRE2 = 0x00000004,
+ RIC0_FRE3 = 0x00000008,
+ RIC0_FRE4 = 0x00000010,
+ RIC0_FRE5 = 0x00000020,
+ RIC0_FRE6 = 0x00000040,
+ RIC0_FRE7 = 0x00000080,
+ RIC0_FRE8 = 0x00000100,
+ RIC0_FRE9 = 0x00000200,
+ RIC0_FRE10 = 0x00000400,
+ RIC0_FRE11 = 0x00000800,
+ RIC0_FRE12 = 0x00001000,
+ RIC0_FRE13 = 0x00002000,
+ RIC0_FRE14 = 0x00004000,
+ RIC0_FRE15 = 0x00008000,
+ RIC0_FRE16 = 0x00010000,
+ RIC0_FRE17 = 0x00020000,
+};
+
+/* RIC0 */
+enum RIS0_BIT {
+ RIS0_FRF0 = 0x00000001,
+ RIS0_FRF1 = 0x00000002,
+ RIS0_FRF2 = 0x00000004,
+ RIS0_FRF3 = 0x00000008,
+ RIS0_FRF4 = 0x00000010,
+ RIS0_FRF5 = 0x00000020,
+ RIS0_FRF6 = 0x00000040,
+ RIS0_FRF7 = 0x00000080,
+ RIS0_FRF8 = 0x00000100,
+ RIS0_FRF9 = 0x00000200,
+ RIS0_FRF10 = 0x00000400,
+ RIS0_FRF11 = 0x00000800,
+ RIS0_FRF12 = 0x00001000,
+ RIS0_FRF13 = 0x00002000,
+ RIS0_FRF14 = 0x00004000,
+ RIS0_FRF15 = 0x00008000,
+ RIS0_FRF16 = 0x00010000,
+ RIS0_FRF17 = 0x00020000,
+};
+
+/* RIC1 */
+enum RIC1_BIT {
+ RIC1_RFWE = 0x80000000,
+};
+
+/* RIS1 */
+enum RIS1_BIT {
+ RIS1_RFWF = 0x80000000,
+};
+
+/* RIC2 */
+enum RIC2_BIT {
+ RIC2_QFE0 = 0x00000001,
+ RIC2_QFE1 = 0x00000002,
+ RIC2_QFE2 = 0x00000004,
+ RIC2_QFE3 = 0x00000008,
+ RIC2_QFE4 = 0x00000010,
+ RIC2_QFE5 = 0x00000020,
+ RIC2_QFE6 = 0x00000040,
+ RIC2_QFE7 = 0x00000080,
+ RIC2_QFE8 = 0x00000100,
+ RIC2_QFE9 = 0x00000200,
+ RIC2_QFE10 = 0x00000400,
+ RIC2_QFE11 = 0x00000800,
+ RIC2_QFE12 = 0x00001000,
+ RIC2_QFE13 = 0x00002000,
+ RIC2_QFE14 = 0x00004000,
+ RIC2_QFE15 = 0x00008000,
+ RIC2_QFE16 = 0x00010000,
+ RIC2_QFE17 = 0x00020000,
+ RIC2_RFFE = 0x80000000,
+};
+
+/* RIS2 */
+enum RIS2_BIT {
+ RIS2_QFF0 = 0x00000001,
+ RIS2_QFF1 = 0x00000002,
+ RIS2_QFF2 = 0x00000004,
+ RIS2_QFF3 = 0x00000008,
+ RIS2_QFF4 = 0x00000010,
+ RIS2_QFF5 = 0x00000020,
+ RIS2_QFF6 = 0x00000040,
+ RIS2_QFF7 = 0x00000080,
+ RIS2_QFF8 = 0x00000100,
+ RIS2_QFF9 = 0x00000200,
+ RIS2_QFF10 = 0x00000400,
+ RIS2_QFF11 = 0x00000800,
+ RIS2_QFF12 = 0x00001000,
+ RIS2_QFF13 = 0x00002000,
+ RIS2_QFF14 = 0x00004000,
+ RIS2_QFF15 = 0x00008000,
+ RIS2_QFF16 = 0x00010000,
+ RIS2_QFF17 = 0x00020000,
+ RIS2_RFFF = 0x80000000,
+};
+
+/* TIC */
+enum TIC_BIT {
+ TIC_FTE0 = 0x00000001, /* Undocumented? */
+ TIC_FTE1 = 0x00000002, /* Undocumented? */
+ TIC_TFUE = 0x00000100,
+ TIC_TFWE = 0x00000200,
+};
+
+/* TIS */
+enum TIS_BIT {
+ TIS_FTF0 = 0x00000001, /* Undocumented? */
+ TIS_FTF1 = 0x00000002, /* Undocumented? */
+ TIS_TFUF = 0x00000100,
+ TIS_TFWF = 0x00000200,
+};
+
+/* ISS */
+enum ISS_BIT {
+ ISS_FRS = 0x00000001, /* Undocumented? */
+ ISS_FTS = 0x00000004, /* Undocumented? */
+ ISS_ES = 0x00000040,
+ ISS_MS = 0x00000080,
+ ISS_TFUS = 0x00000100,
+ ISS_TFWS = 0x00000200,
+ ISS_RFWS = 0x00001000,
+ ISS_CGIS = 0x00002000,
+ ISS_DPS1 = 0x00020000,
+ ISS_DPS2 = 0x00040000,
+ ISS_DPS3 = 0x00080000,
+ ISS_DPS4 = 0x00100000,
+ ISS_DPS5 = 0x00200000,
+ ISS_DPS6 = 0x00400000,
+ ISS_DPS7 = 0x00800000,
+ ISS_DPS8 = 0x01000000,
+ ISS_DPS9 = 0x02000000,
+ ISS_DPS10 = 0x04000000,
+ ISS_DPS11 = 0x08000000,
+ ISS_DPS12 = 0x10000000,
+ ISS_DPS13 = 0x20000000,
+ ISS_DPS14 = 0x40000000,
+ ISS_DPS15 = 0x80000000,
+};
+
+/* GCCR */
+enum GCCR_BIT {
+ GCCR_TCR = 0x00000003,
+ GCCR_TCR_NOREQ = 0x00000000, /* No request */
+ GCCR_TCR_RESET = 0x00000001, /* gPTP/AVTP presentation timer reset */
+ GCCR_TCR_CAPTURE = 0x00000003, /* Capture value set in GCCR.TCSS */
+ GCCR_LTO = 0x00000004,
+ GCCR_LTI = 0x00000008,
+ GCCR_LPTC = 0x00000010,
+ GCCR_LMTT = 0x00000020,
+ GCCR_TCSS = 0x00000300,
+ GCCR_TCSS_GPTP = 0x00000000, /* gPTP timer value */
+ GCCR_TCSS_ADJGPTP = 0x00000100, /* Adjusted gPTP timer value */
+ GCCR_TCSS_AVTP = 0x00000200, /* AVTP presentation time value */
+};
+
+/* GTI */
+enum GTI_BIT {
+ GTI_TIV = 0x0FFFFFFF,
+};
+
+/* GIC */
+enum GIC_BIT {
+ GIC_PTCE = 0x00000001, /* Undocumented? */
+ GIC_PTME = 0x00000004,
+};
+
+/* GIS */
+enum GIS_BIT {
+ GIS_PTCF = 0x00000001, /* Undocumented? */
+ GIS_PTMF = 0x00000004,
+};
+
+/* ECMR */
+enum ECMR_BIT {
+ ECMR_PRM = 0x00000001,
+ ECMR_DM = 0x00000002,
+ ECMR_TE = 0x00000020,
+ ECMR_RE = 0x00000040,
+ ECMR_MPDE = 0x00000200,
+ ECMR_TXF = 0x00010000, /* Undocumented? */
+ ECMR_RXF = 0x00020000,
+ ECMR_PFR = 0x00040000,
+ ECMR_ZPF = 0x00080000, /* Undocumented? */
+ ECMR_RZPF = 0x00100000,
+ ECMR_DPAD = 0x00200000,
+ ECMR_RCSC = 0x00800000,
+ ECMR_TRCCM = 0x04000000,
+};
+
+/* ECSR */
+enum ECSR_BIT {
+ ECSR_ICD = 0x00000001,
+ ECSR_MPD = 0x00000002,
+ ECSR_LCHNG = 0x00000004,
+ ECSR_PHYI = 0x00000008,
+};
+
+/* ECSIPR */
+enum ECSIPR_BIT {
+ ECSIPR_ICDIP = 0x00000001,
+ ECSIPR_MPDIP = 0x00000002,
+ ECSIPR_LCHNGIP = 0x00000004, /* Undocumented? */
+};
+
+/* PIR */
+enum PIR_BIT {
+ PIR_MDC = 0x00000001,
+ PIR_MMD = 0x00000002,
+ PIR_MDO = 0x00000004,
+ PIR_MDI = 0x00000008,
+};
+
+/* PSR */
+enum PSR_BIT {
+ PSR_LMON = 0x00000001,
+};
+
+/* PIPR */
+enum PIPR_BIT {
+ PIPR_PHYIP = 0x00000001,
+};
+
+/* MPR */
+enum MPR_BIT {
+ MPR_MP = 0x0000ffff,
+};
+
+/* GECMR */
+enum GECMR_BIT {
+ GECMR_SPEED = 0x00000001,
+ GECMR_SPEED_100 = 0x00000000,
+ GECMR_SPEED_1000 = 0x00000001,
+};
+
+/* The Ethernet AVB descriptor definitions. */
+struct ravb_desc {
+ __le16 ds; /* Descriptor size */
+ u8 cc; /* Content control MSBs (reserved) */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descriptor pointer */
+};
+
+#define DPTR_ALIGN 4 /* Required descriptor pointer alignment */
+
+enum DIE_DT {
+ /* Frame data */
+ DT_FMID = 0x40,
+ DT_FSTART = 0x50,
+ DT_FEND = 0x60,
+ DT_FSINGLE = 0x70,
+ /* Chain control */
+ DT_LINK = 0x80,
+ DT_LINKFIX = 0x90,
+ DT_EOS = 0xa0,
+ /* HW/SW arbitration */
+ DT_FEMPTY = 0xc0,
+ DT_FEMPTY_IS = 0xd0,
+ DT_FEMPTY_IC = 0xe0,
+ DT_FEMPTY_ND = 0xf0,
+ DT_LEMPTY = 0x20,
+ DT_EEMPTY = 0x30,
+};
+
+struct ravb_rx_desc {
+ __le16 ds_cc; /* Descriptor size and content control LSBs */
+ u8 msc; /* MAC status code */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descpriptor pointer */
+};
+
+struct ravb_ex_rx_desc {
+ __le16 ds_cc; /* Descriptor size and content control lower bits */
+ u8 msc; /* MAC status code */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descpriptor pointer */
+ __le32 ts_n; /* Timestampe nsec */
+ __le32 ts_sl; /* Timestamp low */
+ __le16 ts_sh; /* Timestamp high */
+ __le16 res; /* Reserved bits */
+};
+
+enum RX_DS_CC_BIT {
+ RX_DS = 0x0fff, /* Data size */
+ RX_TR = 0x1000, /* Truncation indication */
+ RX_EI = 0x2000, /* Error indication */
+ RX_PS = 0xc000, /* Padding selection */
+};
+
+/* E-MAC status code */
+enum MSC_BIT {
+ MSC_CRC = 0x01, /* Frame CRC error */
+ MSC_RFE = 0x02, /* Frame reception error (flagged by PHY) */
+ MSC_RTSF = 0x04, /* Frame length error (frame too short) */
+ MSC_RTLF = 0x08, /* Frame length error (frame too long) */
+ MSC_FRE = 0x10, /* Fraction error (not a multiple of 8 bits) */
+ MSC_CRL = 0x20, /* Carrier lost */
+ MSC_CEEF = 0x40, /* Carrier extension error */
+ MSC_MC = 0x80, /* Multicast frame reception */
+};
+
+struct ravb_tx_desc {
+ __le16 ds_tagl; /* Descriptor size and frame tag LSBs */
+ u8 tagh_tsr; /* Frame tag MSBs and timestamp storage request bit */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descpriptor pointer */
+};
+
+enum TX_DS_TAGL_BIT {
+ TX_DS = 0x0fff, /* Data size */
+ TX_TAGL = 0xf000, /* Frame tag LSBs */
+};
+
+enum TX_TAGH_TSR_BIT {
+ TX_TAGH = 0x3f, /* Frame tag MSBs */
+ TX_TSR = 0x40, /* Timestamp storage request */
+};
+enum RAVB_QUEUE {
+ RAVB_BE = 0, /* Best Effort Queue */
+ RAVB_NC, /* Network Control Queue */
+};
+
+#define DBAT_ENTRY_NUM 22
+#define RX_QUEUE_OFFSET 4
+#define NUM_RX_QUEUE 2
+#define NUM_TX_QUEUE 2
+#define NUM_TX_DESC 2 /* TX descriptors per packet */
+
+struct ravb_tstamp_skb {
+ struct list_head list;
+ struct sk_buff *skb;
+ u16 tag;
+};
+
+struct ravb_ptp_perout {
+ u32 target;
+ u32 period;
+};
+
+#define N_EXT_TS 1
+#define N_PER_OUT 1
+
+struct ravb_ptp {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ u32 default_addend;
+ u32 current_addend;
+ int extts[N_EXT_TS];
+ struct ravb_ptp_perout perout[N_PER_OUT];
+};
+
+enum ravb_chip_id {
+ RCAR_GEN2,
+ RCAR_GEN3,
+};
+
+struct ravb_private {
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ void __iomem *addr;
+ struct mdiobb_ctrl mdiobb;
+ u32 num_rx_ring[NUM_RX_QUEUE];
+ u32 num_tx_ring[NUM_TX_QUEUE];
+ u32 desc_bat_size;
+ dma_addr_t desc_bat_dma;
+ struct ravb_desc *desc_bat;
+ dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
+ dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
+ struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
+ struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+ void *tx_align[NUM_TX_QUEUE];
+ struct sk_buff **rx_skb[NUM_RX_QUEUE];
+ struct sk_buff **tx_skb[NUM_TX_QUEUE];
+ u32 rx_over_errors;
+ u32 rx_fifo_errors;
+ struct net_device_stats stats[NUM_RX_QUEUE];
+ u32 tstamp_tx_ctrl;
+ u32 tstamp_rx_ctrl;
+ struct list_head ts_skb_list;
+ u32 ts_skb_tag;
+ struct ravb_ptp ptp;
+ spinlock_t lock; /* Register access lock */
+ u32 cur_rx[NUM_RX_QUEUE]; /* Consumer ring indices */
+ u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */
+ u32 cur_tx[NUM_TX_QUEUE];
+ u32 dirty_tx[NUM_TX_QUEUE];
+ struct napi_struct napi[NUM_RX_QUEUE];
+ struct work_struct work;
+ /* MII transceiver section. */
+ struct mii_bus *mii_bus; /* MDIO bus control */
+ struct phy_device *phydev; /* PHY device control */
+ int link;
+ phy_interface_t phy_interface;
+ int msg_enable;
+ int speed;
+ int duplex;
+ int emac_irq;
+ enum ravb_chip_id chip_id;
+
+ unsigned no_avb_link:1;
+ unsigned avb_link_active_low:1;
+};
+
+static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return ioread32(priv->addr + reg);
+}
+
+static inline void ravb_write(struct net_device *ndev, u32 data,
+ enum ravb_reg reg)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ iowrite32(data, priv->addr + reg);
+}
+
+int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
+
+irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
+void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev);
+void ravb_ptp_stop(struct net_device *ndev);
+
+#endif /* #ifndef __RAVB_H__ */
diff --git a/kernel/drivers/net/ethernet/renesas/ravb_main.c b/kernel/drivers/net/ethernet/renesas/ravb_main.c
new file mode 100644
index 000000000..467d41698
--- /dev/null
+++ b/kernel/drivers/net/ethernet/renesas/ravb_main.c
@@ -0,0 +1,1894 @@
+/* Renesas Ethernet AVB device driver
+ *
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * Based on the SuperH Ethernet driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/cache.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "ravb.h"
+
+#define RAVB_DEF_MSG_ENABLE \
+ (NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
+{
+ int i;
+
+ for (i = 0; i < 10000; i++) {
+ if ((ravb_read(ndev, reg) & mask) == value)
+ return 0;
+ udelay(10);
+ }
+ return -ETIMEDOUT;
+}
+
+static int ravb_config(struct net_device *ndev)
+{
+ int error;
+
+ /* Set config mode */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
+ CCC);
+ /* Check if the operating mode is changed to the config mode */
+ error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
+ if (error)
+ netdev_err(ndev, "failed to switch device to config mode\n");
+
+ return error;
+}
+
+static void ravb_set_duplex(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ecmr = ravb_read(ndev, ECMR);
+
+ if (priv->duplex) /* Full */
+ ecmr |= ECMR_DM;
+ else /* Half */
+ ecmr &= ~ECMR_DM;
+ ravb_write(ndev, ecmr, ECMR);
+}
+
+static void ravb_set_rate(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ switch (priv->speed) {
+ case 100: /* 100BASE */
+ ravb_write(ndev, GECMR_SPEED_100, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ ravb_write(ndev, GECMR_SPEED_1000, GECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ravb_set_buffer_align(struct sk_buff *skb)
+{
+ u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
+
+ if (reserve)
+ skb_reserve(skb, RAVB_ALIGN - reserve);
+}
+
+/* Get MAC address from the MAC address registers
+ *
+ * Ethernet AVB device doesn't have ROM for MAC address.
+ * This function gets the MAC address that was used by a bootloader.
+ */
+static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
+{
+ if (mac) {
+ ether_addr_copy(ndev->dev_addr, mac);
+ } else {
+ ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24);
+ ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF;
+ ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF;
+ ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF;
+ ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF;
+ ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF;
+ }
+}
+
+static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
+{
+ struct ravb_private *priv = container_of(ctrl, struct ravb_private,
+ mdiobb);
+ u32 pir = ravb_read(priv->ndev, PIR);
+
+ if (set)
+ pir |= mask;
+ else
+ pir &= ~mask;
+ ravb_write(priv->ndev, pir, PIR);
+}
+
+/* MDC pin control */
+static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ ravb_mdio_ctrl(ctrl, PIR_MDC, level);
+}
+
+/* Data I/O pin control */
+static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ ravb_mdio_ctrl(ctrl, PIR_MMD, output);
+}
+
+/* Set data bit */
+static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ ravb_mdio_ctrl(ctrl, PIR_MDO, value);
+}
+
+/* Get data bit */
+static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
+{
+ struct ravb_private *priv = container_of(ctrl, struct ravb_private,
+ mdiobb);
+
+ return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
+}
+
+/* MDIO bus control struct */
+static struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = ravb_set_mdc,
+ .set_mdio_dir = ravb_set_mdio_dir,
+ .set_mdio_data = ravb_set_mdio_data,
+ .get_mdio_data = ravb_get_mdio_data,
+};
+
+/* Free skb's and DMA buffers for Ethernet AVB */
+static void ravb_ring_free(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int ring_size;
+ int i;
+
+ /* Free RX skb ringbuffer */
+ if (priv->rx_skb[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++)
+ dev_kfree_skb(priv->rx_skb[q][i]);
+ }
+ kfree(priv->rx_skb[q]);
+ priv->rx_skb[q] = NULL;
+
+ /* Free TX skb ringbuffer */
+ if (priv->tx_skb[q]) {
+ for (i = 0; i < priv->num_tx_ring[q]; i++)
+ dev_kfree_skb(priv->tx_skb[q][i]);
+ }
+ kfree(priv->tx_skb[q]);
+ priv->tx_skb[q] = NULL;
+
+ /* Free aligned TX buffers */
+ kfree(priv->tx_align[q]);
+ priv->tx_align[q] = NULL;
+
+ if (priv->rx_ring[q]) {
+ ring_size = sizeof(struct ravb_ex_rx_desc) *
+ (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
+ priv->rx_desc_dma[q]);
+ priv->rx_ring[q] = NULL;
+ }
+
+ if (priv->tx_ring[q]) {
+ ring_size = sizeof(struct ravb_tx_desc) *
+ (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
+ priv->tx_desc_dma[q]);
+ priv->tx_ring[q] = NULL;
+ }
+}
+
+/* Format skb and descriptor buffer for Ethernet AVB */
+static void ravb_ring_format(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_ex_rx_desc *rx_desc;
+ struct ravb_tx_desc *tx_desc;
+ struct ravb_desc *desc;
+ int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
+ int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
+ NUM_TX_DESC;
+ dma_addr_t dma_addr;
+ int i;
+
+ priv->cur_rx[q] = 0;
+ priv->cur_tx[q] = 0;
+ priv->dirty_rx[q] = 0;
+ priv->dirty_tx[q] = 0;
+
+ memset(priv->rx_ring[q], 0, rx_ring_size);
+ /* Build RX ring buffer */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ /* RX descriptor */
+ rx_desc = &priv->rx_ring[q][i];
+ /* The size of the buffer should be on 16-byte boundary. */
+ rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+ dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
+ /* We just set the data size to 0 for a failed mapping which
+ * should prevent DMA from happening...
+ */
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ rx_desc->ds_cc = cpu_to_le16(0);
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+ rx_desc->die_dt = DT_FEMPTY;
+ }
+ rx_desc = &priv->rx_ring[q][i];
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
+
+ memset(priv->tx_ring[q], 0, tx_ring_size);
+ /* Build TX ring buffer */
+ for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
+ i++, tx_desc++) {
+ tx_desc->die_dt = DT_EEMPTY;
+ tx_desc++;
+ tx_desc->die_dt = DT_EEMPTY;
+ }
+ tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
+ tx_desc->die_dt = DT_LINKFIX; /* type */
+
+ /* RX descriptor base address for best effort */
+ desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
+ desc->die_dt = DT_LINKFIX; /* type */
+ desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+
+ /* TX descriptor base address for best effort */
+ desc = &priv->desc_bat[q];
+ desc->die_dt = DT_LINKFIX; /* type */
+ desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
+}
+
+/* Init skb and descriptor buffer for Ethernet AVB */
+static int ravb_ring_init(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+ int ring_size;
+ int i;
+
+ /* Allocate RX and TX skb rings */
+ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
+ sizeof(*priv->rx_skb[q]), GFP_KERNEL);
+ priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
+ sizeof(*priv->tx_skb[q]), GFP_KERNEL);
+ if (!priv->rx_skb[q] || !priv->tx_skb[q])
+ goto error;
+
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
+ if (!skb)
+ goto error;
+ ravb_set_buffer_align(skb);
+ priv->rx_skb[q][i] = skb;
+ }
+
+ /* Allocate rings for the aligned buffers */
+ priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
+ DPTR_ALIGN - 1, GFP_KERNEL);
+ if (!priv->tx_align[q])
+ goto error;
+
+ /* Allocate all RX descriptors. */
+ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
+ priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
+ if (!priv->rx_ring[q])
+ goto error;
+
+ priv->dirty_rx[q] = 0;
+
+ /* Allocate all TX descriptors. */
+ ring_size = sizeof(struct ravb_tx_desc) *
+ (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
+ priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->tx_desc_dma[q],
+ GFP_KERNEL);
+ if (!priv->tx_ring[q])
+ goto error;
+
+ return 0;
+
+error:
+ ravb_ring_free(ndev, q);
+
+ return -ENOMEM;
+}
+
+/* E-MAC init function */
+static void ravb_emac_init(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ecmr;
+
+ /* Receive frame limit set register */
+ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
+
+ /* PAUSE prohibition */
+ ecmr = ravb_read(ndev, ECMR);
+ ecmr &= ECMR_DM;
+ ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
+ ravb_write(ndev, ecmr, ECMR);
+
+ ravb_set_rate(ndev);
+
+ /* Set MAC address */
+ ravb_write(ndev,
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ ravb_write(ndev,
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+
+ ravb_write(ndev, 1, MPR);
+
+ /* E-MAC status register clear */
+ ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
+
+ /* E-MAC interrupt enable register */
+ ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
+}
+
+/* Device init function for Ethernet AVB */
+static int ravb_dmac_init(struct net_device *ndev)
+{
+ int error;
+
+ /* Set CONFIG mode */
+ error = ravb_config(ndev);
+ if (error)
+ return error;
+
+ error = ravb_ring_init(ndev, RAVB_BE);
+ if (error)
+ return error;
+ error = ravb_ring_init(ndev, RAVB_NC);
+ if (error) {
+ ravb_ring_free(ndev, RAVB_BE);
+ return error;
+ }
+
+ /* Descriptor format */
+ ravb_ring_format(ndev, RAVB_BE);
+ ravb_ring_format(ndev, RAVB_NC);
+
+#if defined(__LITTLE_ENDIAN)
+ ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
+#else
+ ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
+#endif
+
+ /* Set AVB RX */
+ ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
+
+ /* Set FIFO size */
+ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
+
+ /* Timestamp enable */
+ ravb_write(ndev, TCCR_TFEN, TCCR);
+
+ /* Interrupt enable: */
+ /* Frame receive */
+ ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
+ /* Receive FIFO full error, descriptor empty */
+ ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
+ /* Frame transmitted, timestamp FIFO updated */
+ ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
+
+ /* Setting the control will start the AVB-DMAC process. */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
+ CCC);
+
+ return 0;
+}
+
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_tx_desc *desc;
+ int free_num = 0;
+ int entry;
+ u32 size;
+
+ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+ NUM_TX_DESC);
+ desc = &priv->tx_ring[q][entry];
+ if (desc->die_dt != DT_FEMPTY)
+ break;
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+ /* Free the original skb. */
+ if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ /* Last packet descriptor? */
+ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+ entry /= NUM_TX_DESC;
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ stats->tx_packets++;
+ }
+ free_num++;
+ }
+ stats->tx_bytes += size;
+ desc->die_dt = DT_EEMPTY;
+ }
+ return free_num;
+}
+
+static void ravb_get_tx_tstamp(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb;
+ struct timespec64 ts;
+ u16 tag, tfa_tag;
+ int count;
+ u32 tfa2;
+
+ count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
+ while (count--) {
+ tfa2 = ravb_read(ndev, TFA2);
+ tfa_tag = (tfa2 & TFA2_TST) >> 16;
+ ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
+ ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
+ ravb_read(ndev, TFA1);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
+ list) {
+ skb = ts_skb->skb;
+ tag = ts_skb->tag;
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ if (tag == tfa_tag) {
+ skb_tstamp_tx(skb, &shhwtstamps);
+ break;
+ }
+ }
+ ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
+ }
+}
+
+/* Packet receive function for Ethernet AVB */
+static bool ravb_rx(struct net_device *ndev, int *quota, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
+ priv->cur_rx[q];
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_ex_rx_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ struct timespec64 ts;
+ u8 desc_status;
+ u16 pkt_len;
+ int limit;
+
+ boguscnt = min(boguscnt, *quota);
+ limit = boguscnt;
+ desc = &priv->rx_ring[q][entry];
+ while (desc->die_dt != DT_FEMPTY) {
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ desc_status = desc->msc;
+ pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+
+ if (--boguscnt < 0)
+ break;
+
+ /* We use 0-byte descriptors to mark the DMA mapping errors */
+ if (!pkt_len)
+ continue;
+
+ if (desc_status & MSC_MC)
+ stats->multicast++;
+
+ if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
+ MSC_CEEF)) {
+ stats->rx_errors++;
+ if (desc_status & MSC_CRC)
+ stats->rx_crc_errors++;
+ if (desc_status & MSC_RFE)
+ stats->rx_frame_errors++;
+ if (desc_status & (MSC_RTLF | MSC_RTSF))
+ stats->rx_length_errors++;
+ if (desc_status & MSC_CEEF)
+ stats->rx_missed_errors++;
+ } else {
+ u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
+
+ skb = priv->rx_skb[q][entry];
+ priv->rx_skb[q][entry] = NULL;
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
+ get_ts &= (q == RAVB_NC) ?
+ RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
+ ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
+ if (get_ts) {
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
+ 32) | le32_to_cpu(desc->ts_sl);
+ ts.tv_nsec = le32_to_cpu(desc->ts_n);
+ shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+ }
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi[q], skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ }
+
+ entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q][entry];
+ }
+
+ /* Refill the RX ring buffers. */
+ for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
+ entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q][entry];
+ /* The size of the buffer should be on 16-byte boundary. */
+ desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+
+ if (!priv->rx_skb[q][entry]) {
+ skb = netdev_alloc_skb(ndev,
+ PKT_BUF_SZ + RAVB_ALIGN - 1);
+ if (!skb)
+ break; /* Better luck next round. */
+ ravb_set_buffer_align(skb);
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data,
+ le16_to_cpu(desc->ds_cc),
+ DMA_FROM_DEVICE);
+ skb_checksum_none_assert(skb);
+ /* We just set the data size to 0 for a failed mapping
+ * which should prevent DMA from happening...
+ */
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ desc->ds_cc = cpu_to_le16(0);
+ desc->dptr = cpu_to_le32(dma_addr);
+ priv->rx_skb[q][entry] = skb;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY;
+ }
+
+ *quota -= limit - (++boguscnt);
+
+ return boguscnt <= 0;
+}
+
+static void ravb_rcv_snd_disable(struct net_device *ndev)
+{
+ /* Disable TX and RX */
+ ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
+}
+
+static void ravb_rcv_snd_enable(struct net_device *ndev)
+{
+ /* Enable TX and RX */
+ ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
+}
+
+/* function for waiting dma process finished */
+static int ravb_stop_dma(struct net_device *ndev)
+{
+ int error;
+
+ /* Wait for stopping the hardware TX process */
+ error = ravb_wait(ndev, TCCR,
+ TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
+ if (error)
+ return error;
+
+ error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
+ 0);
+ if (error)
+ return error;
+
+ /* Stop the E-MAC's RX/TX processes. */
+ ravb_rcv_snd_disable(ndev);
+
+ /* Wait for stopping the RX DMA process */
+ error = ravb_wait(ndev, CSR, CSR_RPO, 0);
+ if (error)
+ return error;
+
+ /* Stop AVB-DMAC process */
+ return ravb_config(ndev);
+}
+
+/* E-MAC interrupt handler */
+static void ravb_emac_interrupt(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ecsr, psr;
+
+ ecsr = ravb_read(ndev, ECSR);
+ ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
+ if (ecsr & ECSR_ICD)
+ ndev->stats.tx_carrier_errors++;
+ if (ecsr & ECSR_LCHNG) {
+ /* Link changed */
+ if (priv->no_avb_link)
+ return;
+ psr = ravb_read(ndev, PSR);
+ if (priv->avb_link_active_low)
+ psr ^= PSR_LMON;
+ if (!(psr & PSR_LMON)) {
+ /* DIsable RX and TX */
+ ravb_rcv_snd_disable(ndev);
+ } else {
+ /* Enable RX and TX */
+ ravb_rcv_snd_enable(ndev);
+ }
+ }
+}
+
+/* Error interrupt handler */
+static void ravb_error_interrupt(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 eis, ris2;
+
+ eis = ravb_read(ndev, EIS);
+ ravb_write(ndev, ~EIS_QFS, EIS);
+ if (eis & EIS_QFS) {
+ ris2 = ravb_read(ndev, RIS2);
+ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
+
+ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF0)
+ priv->stats[RAVB_BE].rx_over_errors++;
+
+ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF1)
+ priv->stats[RAVB_NC].rx_over_errors++;
+
+ /* Receive FIFO Overflow int */
+ if (ris2 & RIS2_RFFF)
+ priv->rx_fifo_errors++;
+ }
+}
+
+static irqreturn_t ravb_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+ irqreturn_t result = IRQ_NONE;
+ u32 iss;
+
+ spin_lock(&priv->lock);
+ /* Get interrupt status */
+ iss = ravb_read(ndev, ISS);
+
+ /* Received and transmitted interrupts */
+ if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
+ u32 ris0 = ravb_read(ndev, RIS0);
+ u32 ric0 = ravb_read(ndev, RIC0);
+ u32 tis = ravb_read(ndev, TIS);
+ u32 tic = ravb_read(ndev, TIC);
+ int q;
+
+ /* Timestamp updated */
+ if (tis & TIS_TFUF) {
+ ravb_write(ndev, ~TIS_TFUF, TIS);
+ ravb_get_tx_tstamp(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* Network control and best effort queue RX/TX */
+ for (q = RAVB_NC; q >= RAVB_BE; q--) {
+ if (((ris0 & ric0) & BIT(q)) ||
+ ((tis & tic) & BIT(q))) {
+ if (napi_schedule_prep(&priv->napi[q])) {
+ /* Mask RX and TX interrupts */
+ ric0 &= ~BIT(q);
+ tic &= ~BIT(q);
+ ravb_write(ndev, ric0, RIC0);
+ ravb_write(ndev, tic, TIC);
+ __napi_schedule(&priv->napi[q]);
+ } else {
+ netdev_warn(ndev,
+ "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
+ ris0, ric0);
+ netdev_warn(ndev,
+ " tx status 0x%08x, tx mask 0x%08x.\n",
+ tis, tic);
+ }
+ result = IRQ_HANDLED;
+ }
+ }
+ }
+
+ /* E-MAC status summary */
+ if (iss & ISS_MS) {
+ ravb_emac_interrupt(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* Error status summary */
+ if (iss & ISS_ES) {
+ ravb_error_interrupt(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ if (iss & ISS_CGIS)
+ result = ravb_ptp_interrupt(ndev);
+
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return result;
+}
+
+static int ravb_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *ndev = napi->dev;
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int q = napi - priv->napi;
+ int mask = BIT(q);
+ int quota = budget;
+ u32 ris0, tis;
+
+ for (;;) {
+ tis = ravb_read(ndev, TIS);
+ ris0 = ravb_read(ndev, RIS0);
+ if (!((ris0 & mask) || (tis & mask)))
+ break;
+
+ /* Processing RX Descriptor Ring */
+ if (ris0 & mask) {
+ /* Clear RX interrupt */
+ ravb_write(ndev, ~mask, RIS0);
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
+ }
+ /* Processing TX Descriptor Ring */
+ if (tis & mask) {
+ spin_lock_irqsave(&priv->lock, flags);
+ /* Clear TX interrupt */
+ ravb_write(ndev, ~mask, TIS);
+ ravb_tx_free(ndev, q);
+ netif_wake_subqueue(ndev, q);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ }
+
+ napi_complete(napi);
+
+ /* Re-enable RX/TX interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
+ ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Receive error message handling */
+ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
+ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+ ndev->stats.rx_over_errors = priv->rx_over_errors;
+ netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
+ }
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+ ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
+ netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
+ }
+out:
+ return budget - quota;
+}
+
+/* PHY state control function */
+static void ravb_adjust_link(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+ bool new_state = false;
+
+ if (phydev->link) {
+ if (phydev->duplex != priv->duplex) {
+ new_state = true;
+ priv->duplex = phydev->duplex;
+ ravb_set_duplex(ndev);
+ }
+
+ if (phydev->speed != priv->speed) {
+ new_state = true;
+ priv->speed = phydev->speed;
+ ravb_set_rate(ndev);
+ }
+ if (!priv->link) {
+ ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
+ ECMR);
+ new_state = true;
+ priv->link = phydev->link;
+ if (priv->no_avb_link)
+ ravb_rcv_snd_enable(ndev);
+ }
+ } else if (priv->link) {
+ new_state = true;
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = -1;
+ if (priv->no_avb_link)
+ ravb_rcv_snd_disable(ndev);
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+}
+
+/* PHY init function */
+static int ravb_phy_init(struct net_device *ndev)
+{
+ struct device_node *np = ndev->dev.parent->of_node;
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev;
+ struct device_node *pn;
+
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = -1;
+
+ /* Try connecting to PHY */
+ pn = of_parse_phandle(np, "phy-handle", 0);
+ phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
+ priv->phy_interface);
+ if (!phydev) {
+ netdev_err(ndev, "failed to connect PHY\n");
+ return -ENOENT;
+ }
+
+ /* This driver only support 10/100Mbit speeds on Gen3
+ * at this time.
+ */
+ if (priv->chip_id == RCAR_GEN3) {
+ int err;
+
+ err = phy_set_max_speed(phydev, SPEED_100);
+ if (err) {
+ netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
+ phy_disconnect(phydev);
+ return err;
+ }
+
+ netdev_info(ndev, "limited PHY to 100Mbit/s\n");
+ }
+
+ /* 10BASE is not supported */
+ phydev->supported &= ~PHY_10BT_FEATURES;
+
+ netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
+ phydev->addr, phydev->irq, phydev->drv->name);
+
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+/* PHY control start function */
+static int ravb_phy_start(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error;
+
+ error = ravb_phy_init(ndev);
+ if (error)
+ return error;
+
+ phy_start(priv->phydev);
+
+ return 0;
+}
+
+static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error = -ENODEV;
+ unsigned long flags;
+
+ if (priv->phydev) {
+ spin_lock_irqsave(&priv->lock, flags);
+ error = phy_ethtool_gset(priv->phydev, ecmd);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return error;
+}
+
+static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int error;
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable TX and RX */
+ ravb_rcv_snd_disable(ndev);
+
+ error = phy_ethtool_sset(priv->phydev, ecmd);
+ if (error)
+ goto error_exit;
+
+ if (ecmd->duplex == DUPLEX_FULL)
+ priv->duplex = 1;
+ else
+ priv->duplex = 0;
+
+ ravb_set_duplex(ndev);
+
+error_exit:
+ mdelay(1);
+
+ /* Enable TX and RX */
+ ravb_rcv_snd_enable(ndev);
+
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_nway_reset(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error = -ENODEV;
+ unsigned long flags;
+
+ if (priv->phydev) {
+ spin_lock_irqsave(&priv->lock, flags);
+ error = phy_start_aneg(priv->phydev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return error;
+}
+
+static u32 ravb_get_msglevel(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+static void ravb_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ priv->msg_enable = value;
+}
+
+static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_queue_0_current",
+ "tx_queue_0_current",
+ "rx_queue_0_dirty",
+ "tx_queue_0_dirty",
+ "rx_queue_0_packets",
+ "tx_queue_0_packets",
+ "rx_queue_0_bytes",
+ "tx_queue_0_bytes",
+ "rx_queue_0_mcast_packets",
+ "rx_queue_0_errors",
+ "rx_queue_0_crc_errors",
+ "rx_queue_0_frame_errors",
+ "rx_queue_0_length_errors",
+ "rx_queue_0_missed_errors",
+ "rx_queue_0_over_errors",
+
+ "rx_queue_1_current",
+ "tx_queue_1_current",
+ "rx_queue_1_dirty",
+ "tx_queue_1_dirty",
+ "rx_queue_1_packets",
+ "tx_queue_1_packets",
+ "rx_queue_1_bytes",
+ "tx_queue_1_bytes",
+ "rx_queue_1_mcast_packets",
+ "rx_queue_1_errors",
+ "rx_queue_1_crc_errors",
+ "rx_queue_1_frame_errors",
+ "rx_queue_1_length_errors",
+ "rx_queue_1_missed_errors",
+ "rx_queue_1_over_errors",
+};
+
+#define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
+
+static int ravb_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return RAVB_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ravb_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int i = 0;
+ int q;
+
+ /* Device-specific stats */
+ for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
+ struct net_device_stats *stats = &priv->stats[q];
+
+ data[i++] = priv->cur_rx[q];
+ data[i++] = priv->cur_tx[q];
+ data[i++] = priv->dirty_rx[q];
+ data[i++] = priv->dirty_tx[q];
+ data[i++] = stats->rx_packets;
+ data[i++] = stats->tx_packets;
+ data[i++] = stats->rx_bytes;
+ data[i++] = stats->tx_bytes;
+ data[i++] = stats->multicast;
+ data[i++] = stats->rx_errors;
+ data[i++] = stats->rx_crc_errors;
+ data[i++] = stats->rx_frame_errors;
+ data[i++] = stats->rx_length_errors;
+ data[i++] = stats->rx_missed_errors;
+ data[i++] = stats->rx_over_errors;
+ }
+}
+
+static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
+ break;
+ }
+}
+
+static void ravb_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ ring->rx_max_pending = BE_RX_RING_MAX;
+ ring->tx_max_pending = BE_TX_RING_MAX;
+ ring->rx_pending = priv->num_rx_ring[RAVB_BE];
+ ring->tx_pending = priv->num_tx_ring[RAVB_BE];
+}
+
+static int ravb_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error;
+
+ if (ring->tx_pending > BE_TX_RING_MAX ||
+ ring->rx_pending > BE_RX_RING_MAX ||
+ ring->tx_pending < BE_TX_RING_MIN ||
+ ring->rx_pending < BE_RX_RING_MIN)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+ /* Wait for DMA stopping */
+ error = ravb_stop_dma(ndev);
+ if (error) {
+ netdev_err(ndev,
+ "cannot set ringparam! Any AVB processes are still running?\n");
+ return error;
+ }
+ synchronize_irq(ndev->irq);
+
+ /* Free all the skb's in the RX queue and the DMA buffers. */
+ ravb_ring_free(ndev, RAVB_BE);
+ ravb_ring_free(ndev, RAVB_NC);
+ }
+
+ /* Set new parameters */
+ priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
+ priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
+
+ if (netif_running(ndev)) {
+ error = ravb_dmac_init(ndev);
+ if (error) {
+ netdev_err(ndev,
+ "%s: ravb_dmac_init() failed, error %d\n",
+ __func__, error);
+ return error;
+ }
+
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_device_attach(ndev);
+ }
+
+ return 0;
+}
+
+static int ravb_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ info->phc_index = ptp_clock_index(priv->ptp.clock);
+
+ return 0;
+}
+
+static const struct ethtool_ops ravb_ethtool_ops = {
+ .get_settings = ravb_get_settings,
+ .set_settings = ravb_set_settings,
+ .nway_reset = ravb_nway_reset,
+ .get_msglevel = ravb_get_msglevel,
+ .set_msglevel = ravb_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ravb_get_strings,
+ .get_ethtool_stats = ravb_get_ethtool_stats,
+ .get_sset_count = ravb_get_sset_count,
+ .get_ringparam = ravb_get_ringparam,
+ .set_ringparam = ravb_set_ringparam,
+ .get_ts_info = ravb_get_ts_info,
+};
+
+/* Network device open function for Ethernet AVB */
+static int ravb_open(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error;
+
+ napi_enable(&priv->napi[RAVB_BE]);
+ napi_enable(&priv->napi[RAVB_NC]);
+
+ error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
+ ndev);
+ if (error) {
+ netdev_err(ndev, "cannot request IRQ\n");
+ goto out_napi_off;
+ }
+
+ if (priv->chip_id == RCAR_GEN3) {
+ error = request_irq(priv->emac_irq, ravb_interrupt,
+ IRQF_SHARED, ndev->name, ndev);
+ if (error) {
+ netdev_err(ndev, "cannot request IRQ\n");
+ goto out_free_irq;
+ }
+ }
+
+ /* Device init */
+ error = ravb_dmac_init(ndev);
+ if (error)
+ goto out_free_irq2;
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
+
+ /* PHY control start */
+ error = ravb_phy_start(ndev);
+ if (error)
+ goto out_ptp_stop;
+
+ return 0;
+
+out_ptp_stop:
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+out_free_irq2:
+ if (priv->chip_id == RCAR_GEN3)
+ free_irq(priv->emac_irq, ndev);
+out_free_irq:
+ free_irq(ndev->irq, ndev);
+out_napi_off:
+ napi_disable(&priv->napi[RAVB_NC]);
+ napi_disable(&priv->napi[RAVB_BE]);
+ return error;
+}
+
+/* Timeout function for Ethernet AVB */
+static void ravb_tx_timeout(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ netif_err(priv, tx_err, ndev,
+ "transmit timed out, status %08x, resetting...\n",
+ ravb_read(ndev, ISS));
+
+ /* tx_errors count up */
+ ndev->stats.tx_errors++;
+
+ schedule_work(&priv->work);
+}
+
+static void ravb_tx_timeout_work(struct work_struct *work)
+{
+ struct ravb_private *priv = container_of(work, struct ravb_private,
+ work);
+ struct net_device *ndev = priv->ndev;
+
+ netif_tx_stop_all_queues(ndev);
+
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+
+ /* Wait for DMA stopping */
+ ravb_stop_dma(ndev);
+
+ ravb_ring_free(ndev, RAVB_BE);
+ ravb_ring_free(ndev, RAVB_NC);
+
+ /* Device init */
+ ravb_dmac_init(ndev);
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
+}
+
+/* Packet transmit function for Ethernet AVB */
+static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u16 q = skb_get_queue_mapping(skb);
+ struct ravb_tstamp_skb *ts_skb;
+ struct ravb_tx_desc *desc;
+ unsigned long flags;
+ u32 dma_addr;
+ void *buffer;
+ u32 entry;
+ u32 len;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
+ NUM_TX_DESC) {
+ netif_err(priv, tx_queued, ndev,
+ "still transmitting with the full ring!\n");
+ netif_stop_subqueue(ndev, q);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
+ priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ goto drop;
+
+ buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+ entry / NUM_TX_DESC * DPTR_ALIGN;
+ len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+ memcpy(buffer, skb->data, len);
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto drop;
+
+ desc = &priv->tx_ring[q][entry];
+ desc->ds_tagl = cpu_to_le16(len);
+ desc->dptr = cpu_to_le32(dma_addr);
+
+ buffer = skb->data + len;
+ len = skb->len - len;
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto unmap;
+
+ desc++;
+ desc->ds_tagl = cpu_to_le16(len);
+ desc->dptr = cpu_to_le32(dma_addr);
+
+ /* TX timestamp required */
+ if (q == RAVB_NC) {
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ desc--;
+ dma_unmap_single(ndev->dev.parent, dma_addr, len,
+ DMA_TO_DEVICE);
+ goto unmap;
+ }
+ ts_skb->skb = skb;
+ ts_skb->tag = priv->ts_skb_tag++;
+ priv->ts_skb_tag &= 0x3ff;
+ list_add_tail(&ts_skb->list, &priv->ts_skb_list);
+
+ /* TAG and timestamp required flag */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_tx_timestamp(skb);
+ desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
+ desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
+ }
+
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
+ desc->die_dt = DT_FEND;
+ desc--;
+ desc->die_dt = DT_FSTART;
+
+ ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
+
+ priv->cur_tx[q] += NUM_TX_DESC;
+ if (priv->cur_tx[q] - priv->dirty_tx[q] >
+ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
+ netif_stop_subqueue(ndev, q);
+
+exit:
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return NETDEV_TX_OK;
+
+unmap:
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
+drop:
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
+ goto exit;
+}
+
+static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ /* If skb needs TX timestamp, it is handled in network control queue */
+ return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
+ RAVB_BE;
+
+}
+
+static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *nstats, *stats0, *stats1;
+
+ nstats = &ndev->stats;
+ stats0 = &priv->stats[RAVB_BE];
+ stats1 = &priv->stats[RAVB_NC];
+
+ nstats->tx_dropped += ravb_read(ndev, TROCR);
+ ravb_write(ndev, 0, TROCR); /* (write clear) */
+ nstats->collisions += ravb_read(ndev, CDCR);
+ ravb_write(ndev, 0, CDCR); /* (write clear) */
+ nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
+ ravb_write(ndev, 0, LCCR); /* (write clear) */
+
+ nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
+ ravb_write(ndev, 0, CERCR); /* (write clear) */
+ nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
+ ravb_write(ndev, 0, CEECR); /* (write clear) */
+
+ nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
+ nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
+ nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
+ nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
+ nstats->multicast = stats0->multicast + stats1->multicast;
+ nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
+ nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
+ nstats->rx_frame_errors =
+ stats0->rx_frame_errors + stats1->rx_frame_errors;
+ nstats->rx_length_errors =
+ stats0->rx_length_errors + stats1->rx_length_errors;
+ nstats->rx_missed_errors =
+ stats0->rx_missed_errors + stats1->rx_missed_errors;
+ nstats->rx_over_errors =
+ stats0->rx_over_errors + stats1->rx_over_errors;
+
+ return nstats;
+}
+
+/* Update promiscuous bit */
+static void ravb_set_rx_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ u32 ecmr;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ecmr = ravb_read(ndev, ECMR);
+ if (ndev->flags & IFF_PROMISC)
+ ecmr |= ECMR_PRM;
+ else
+ ecmr &= ~ECMR_PRM;
+ ravb_write(ndev, ecmr, ECMR);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/* Device close function for Ethernet AVB */
+static int ravb_close(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+
+ netif_tx_stop_all_queues(ndev);
+
+ /* Disable interrupts by clearing the interrupt masks. */
+ ravb_write(ndev, 0, RIC0);
+ ravb_write(ndev, 0, RIC1);
+ ravb_write(ndev, 0, RIC2);
+ ravb_write(ndev, 0, TIC);
+
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+
+ /* Set the config mode to stop the AVB-DMAC's processes */
+ if (ravb_stop_dma(ndev) < 0)
+ netdev_err(ndev,
+ "device will be stopped after h/w processes are done.\n");
+
+ /* Clear the timestamp list */
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ }
+
+ /* PHY disconnect */
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ free_irq(ndev->irq, ndev);
+
+ napi_disable(&priv->napi[RAVB_NC]);
+ napi_disable(&priv->napi[RAVB_BE]);
+
+ /* Free all the skb's in the RX queue and the DMA buffers. */
+ ravb_ring_free(ndev, RAVB_BE);
+ ravb_ring_free(ndev, RAVB_NC);
+
+ return 0;
+}
+
+static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
+ HWTSTAMP_TX_OFF;
+ if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ else
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/* Control hardware time stamping */
+static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+ u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
+ u32 tstamp_tx_ctrl;
+
+ if (copy_from_user(&config, req->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* Reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tstamp_tx_ctrl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tstamp_rx_ctrl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
+ break;
+ default:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
+ }
+
+ priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+
+ return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/* ioctl to device function */
+static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return ravb_hwtstamp_get(ndev, req);
+ case SIOCSHWTSTAMP:
+ return ravb_hwtstamp_set(ndev, req);
+ }
+
+ return phy_mii_ioctl(phydev, req, cmd);
+}
+
+static const struct net_device_ops ravb_netdev_ops = {
+ .ndo_open = ravb_open,
+ .ndo_stop = ravb_close,
+ .ndo_start_xmit = ravb_start_xmit,
+ .ndo_select_queue = ravb_select_queue,
+ .ndo_get_stats = ravb_get_stats,
+ .ndo_set_rx_mode = ravb_set_rx_mode,
+ .ndo_tx_timeout = ravb_tx_timeout,
+ .ndo_do_ioctl = ravb_do_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+/* MDIO bus init function */
+static int ravb_mdio_init(struct ravb_private *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ int error;
+
+ /* Bitbang init */
+ priv->mdiobb.ops = &bb_ops;
+
+ /* MII controller setting */
+ priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
+ if (!priv->mii_bus)
+ return -ENOMEM;
+
+ /* Hook up MII support for ethtool */
+ priv->mii_bus->name = "ravb_mii";
+ priv->mii_bus->parent = dev;
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+
+ /* Register MDIO bus */
+ error = of_mdiobus_register(priv->mii_bus, dev->of_node);
+ if (error)
+ goto out_free_bus;
+
+ return 0;
+
+out_free_bus:
+ free_mdio_bitbang(priv->mii_bus);
+ return error;
+}
+
+/* MDIO bus release function */
+static int ravb_mdio_release(struct ravb_private *priv)
+{
+ /* Unregister mdio bus */
+ mdiobus_unregister(priv->mii_bus);
+
+ /* Free bitbang info */
+ free_mdio_bitbang(priv->mii_bus);
+
+ return 0;
+}
+
+static const struct of_device_id ravb_match_table[] = {
+ { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
+ { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
+ { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ravb_match_table);
+
+static int ravb_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct ravb_private *priv;
+ enum ravb_chip_id chip_id;
+ struct net_device *ndev;
+ int error, irq, q;
+ struct resource *res;
+
+ if (!np) {
+ dev_err(&pdev->dev,
+ "this driver is required to be instantiated from device tree\n");
+ return -EINVAL;
+ }
+
+ /* Get base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
+ NUM_TX_QUEUE, NUM_RX_QUEUE);
+ if (!ndev)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ /* The Ether-specific entries in the device structure. */
+ ndev->base_addr = res->start;
+ ndev->dma = -1;
+
+ match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev);
+ chip_id = (enum ravb_chip_id)match->data;
+
+ if (chip_id == RCAR_GEN3)
+ irq = platform_get_irq_byname(pdev, "ch22");
+ else
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ ndev->irq = irq;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->pdev = pdev;
+ priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
+ priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
+ priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
+ priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
+ priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->addr)) {
+ error = PTR_ERR(priv->addr);
+ goto out_release;
+ }
+
+ spin_lock_init(&priv->lock);
+ INIT_WORK(&priv->work, ravb_tx_timeout_work);
+
+ priv->phy_interface = of_get_phy_mode(np);
+
+ priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
+ priv->avb_link_active_low =
+ of_property_read_bool(np, "renesas,ether-link-active-low");
+
+ if (chip_id == RCAR_GEN3) {
+ irq = platform_get_irq_byname(pdev, "ch24");
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->emac_irq = irq;
+ }
+
+ priv->chip_id = chip_id;
+
+ /* Set function */
+ ndev->netdev_ops = &ravb_netdev_ops;
+ ndev->ethtool_ops = &ravb_ethtool_ops;
+
+ /* Set AVB config mode */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
+ CCC);
+
+ /* Set CSEL value */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
+ CCC);
+
+ /* Set GTI value */
+ ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI);
+
+ /* Request GTI loading */
+ ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
+
+ /* Allocate descriptor base address table */
+ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
+ priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
+ &priv->desc_bat_dma, GFP_KERNEL);
+ if (!priv->desc_bat) {
+ dev_err(&pdev->dev,
+ "Cannot allocate desc base address table (size %d bytes)\n",
+ priv->desc_bat_size);
+ error = -ENOMEM;
+ goto out_release;
+ }
+ for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
+ priv->desc_bat[q].die_dt = DT_EOS;
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
+
+ /* Initialise HW timestamp list */
+ INIT_LIST_HEAD(&priv->ts_skb_list);
+
+ /* Debug message level */
+ priv->msg_enable = RAVB_DEF_MSG_ENABLE;
+
+ /* Read and set MAC address */
+ ravb_read_mac_address(ndev, of_get_mac_address(np));
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ dev_warn(&pdev->dev,
+ "no valid MAC address supplied, using a random one\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ /* MDIO bus init */
+ error = ravb_mdio_init(priv);
+ if (error) {
+ dev_err(&pdev->dev, "failed to initialize MDIO\n");
+ goto out_dma_free;
+ }
+
+ netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
+ netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
+
+ /* Network device register */
+ error = register_netdev(ndev);
+ if (error)
+ goto out_napi_del;
+
+ /* Print device information */
+ netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+
+ platform_set_drvdata(pdev, ndev);
+
+ return 0;
+
+out_napi_del:
+ netif_napi_del(&priv->napi[RAVB_NC]);
+ netif_napi_del(&priv->napi[RAVB_BE]);
+ ravb_mdio_release(priv);
+out_dma_free:
+ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
+out_release:
+ if (ndev)
+ free_netdev(ndev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return error;
+}
+
+static int ravb_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
+ /* Set reset mode */
+ ravb_write(ndev, CCC_OPC_RESET, CCC);
+ pm_runtime_put_sync(&pdev->dev);
+ unregister_netdev(ndev);
+ netif_napi_del(&priv->napi[RAVB_NC]);
+ netif_napi_del(&priv->napi[RAVB_BE]);
+ ravb_mdio_release(priv);
+ pm_runtime_disable(&pdev->dev);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ravb_runtime_nop(struct device *dev)
+{
+ /* Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * This driver re-initializes all registers after
+ * pm_runtime_get_sync() anyway so there is no need
+ * to save and restore registers here.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops ravb_dev_pm_ops = {
+ .runtime_suspend = ravb_runtime_nop,
+ .runtime_resume = ravb_runtime_nop,
+};
+
+#define RAVB_PM_OPS (&ravb_dev_pm_ops)
+#else
+#define RAVB_PM_OPS NULL
+#endif
+
+static struct platform_driver ravb_driver = {
+ .probe = ravb_probe,
+ .remove = ravb_remove,
+ .driver = {
+ .name = "ravb",
+ .pm = RAVB_PM_OPS,
+ .of_match_table = ravb_match_table,
+ },
+};
+
+module_platform_driver(ravb_driver);
+
+MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
+MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/net/ethernet/renesas/ravb_ptp.c b/kernel/drivers/net/ethernet/renesas/ravb_ptp.c
new file mode 100644
index 000000000..7a8ce920c
--- /dev/null
+++ b/kernel/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -0,0 +1,359 @@
+/* PTP 1588 clock using the Renesas Ethernet AVB
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "ravb.h"
+
+static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
+{
+ struct net_device *ndev = priv->ndev;
+ int error;
+
+ error = ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+ if (error)
+ return error;
+
+ ravb_write(ndev, ravb_read(ndev, GCCR) | request, GCCR);
+ return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_time_read(struct ravb_private *priv, struct timespec64 *ts)
+{
+ struct net_device *ndev = priv->ndev;
+ int error;
+
+ error = ravb_ptp_tcr_request(priv, GCCR_TCR_CAPTURE);
+ if (error)
+ return error;
+
+ ts->tv_nsec = ravb_read(ndev, GCT0);
+ ts->tv_sec = ravb_read(ndev, GCT1) |
+ ((s64)ravb_read(ndev, GCT2) << 32);
+
+ return 0;
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_time_write(struct ravb_private *priv,
+ const struct timespec64 *ts)
+{
+ struct net_device *ndev = priv->ndev;
+ int error;
+ u32 gccr;
+
+ error = ravb_ptp_tcr_request(priv, GCCR_TCR_RESET);
+ if (error)
+ return error;
+
+ gccr = ravb_read(ndev, GCCR);
+ if (gccr & GCCR_LTO)
+ return -EBUSY;
+ ravb_write(ndev, ts->tv_nsec, GTO0);
+ ravb_write(ndev, ts->tv_sec, GTO1);
+ ravb_write(ndev, (ts->tv_sec >> 32) & 0xffff, GTO2);
+ ravb_write(ndev, gccr | GCCR_LTO, GCCR);
+
+ return 0;
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns)
+{
+ struct net_device *ndev = priv->ndev;
+ /* When the comparison value (GPTC.PTCV) is in range of
+ * [x-1 to x+1] (x is the configured increment value in
+ * GTI.TIV), it may happen that a comparison match is
+ * not detected when the timer wraps around.
+ */
+ u32 gti_ns_plus_1 = (priv->ptp.current_addend >> 20) + 1;
+ u32 gccr;
+
+ if (ns < gti_ns_plus_1)
+ ns = gti_ns_plus_1;
+ else if (ns > 0 - gti_ns_plus_1)
+ ns = 0 - gti_ns_plus_1;
+
+ gccr = ravb_read(ndev, GCCR);
+ if (gccr & GCCR_LPTC)
+ return -EBUSY;
+ ravb_write(ndev, ns, GPTC);
+ ravb_write(ndev, gccr | GCCR_LPTC, GCCR);
+
+ return 0;
+}
+
+/* PTP clock operations */
+static int ravb_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct net_device *ndev = priv->ndev;
+ unsigned long flags;
+ u32 diff, addend;
+ bool neg_adj = false;
+ u32 gccr;
+
+ if (ppb < 0) {
+ neg_adj = true;
+ ppb = -ppb;
+ }
+ addend = priv->ptp.default_addend;
+ diff = div_u64((u64)addend * ppb, NSEC_PER_SEC);
+
+ addend = neg_adj ? addend - diff : addend + diff;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->ptp.current_addend = addend;
+
+ gccr = ravb_read(ndev, GCCR);
+ if (gccr & GCCR_LTI) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return -EBUSY;
+ }
+ ravb_write(ndev, addend & GTI_TIV, GTI);
+ ravb_write(ndev, gccr | GCCR_LTI, GCCR);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int ravb_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct timespec64 ts;
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ error = ravb_ptp_time_read(priv, &ts);
+ if (!error) {
+ u64 now = ktime_to_ns(timespec64_to_ktime(ts));
+
+ ts = ns_to_timespec64(now + delta);
+ error = ravb_ptp_time_write(priv, &ts);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ error = ravb_ptp_time_read(priv, ts);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ error = ravb_ptp_time_write(priv, ts);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_extts(struct ptp_clock_info *ptp,
+ struct ptp_extts_request *req, int on)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct net_device *ndev = priv->ndev;
+ unsigned long flags;
+ u32 gic;
+
+ if (req->index)
+ return -EINVAL;
+
+ if (priv->ptp.extts[req->index] == on)
+ return 0;
+ priv->ptp.extts[req->index] = on;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ gic = ravb_read(ndev, GIC);
+ if (on)
+ gic |= GIC_PTCE;
+ else
+ gic &= ~GIC_PTCE;
+ ravb_write(ndev, gic, GIC);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int ravb_ptp_perout(struct ptp_clock_info *ptp,
+ struct ptp_perout_request *req, int on)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct net_device *ndev = priv->ndev;
+ struct ravb_ptp_perout *perout;
+ unsigned long flags;
+ int error = 0;
+ u32 gic;
+
+ if (req->index)
+ return -EINVAL;
+
+ if (on) {
+ u64 start_ns;
+ u64 period_ns;
+
+ start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec;
+ period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec;
+
+ if (start_ns > U32_MAX) {
+ netdev_warn(ndev,
+ "ptp: start value (nsec) is over limit. Maximum size of start is only 32 bits\n");
+ return -ERANGE;
+ }
+
+ if (period_ns > U32_MAX) {
+ netdev_warn(ndev,
+ "ptp: period value (nsec) is over limit. Maximum size of period is only 32 bits\n");
+ return -ERANGE;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ perout = &priv->ptp.perout[req->index];
+ perout->target = (u32)start_ns;
+ perout->period = (u32)period_ns;
+ error = ravb_ptp_update_compare(priv, (u32)start_ns);
+ if (!error) {
+ /* Unmask interrupt */
+ gic = ravb_read(ndev, GIC);
+ gic |= GIC_PTME;
+ ravb_write(ndev, gic, GIC);
+ }
+ } else {
+ spin_lock_irqsave(&priv->lock, flags);
+
+ perout = &priv->ptp.perout[req->index];
+ perout->period = 0;
+
+ /* Mask interrupt */
+ gic = ravb_read(ndev, GIC);
+ gic &= ~GIC_PTME;
+ ravb_write(ndev, gic, GIC);
+ }
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ switch (req->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return ravb_ptp_extts(ptp, &req->extts, on);
+ case PTP_CLK_REQ_PEROUT:
+ return ravb_ptp_perout(ptp, &req->perout, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct ptp_clock_info ravb_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "ravb clock",
+ .max_adj = 50000000,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = N_PER_OUT,
+ .adjfreq = ravb_ptp_adjfreq,
+ .adjtime = ravb_ptp_adjtime,
+ .gettime64 = ravb_ptp_gettime64,
+ .settime64 = ravb_ptp_settime64,
+ .enable = ravb_ptp_enable,
+};
+
+/* Caller must hold the lock */
+irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 gis = ravb_read(ndev, GIS);
+
+ gis &= ravb_read(ndev, GIC);
+ if (gis & GIS_PTCF) {
+ struct ptp_clock_event event;
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ravb_read(ndev, GCPT);
+ ptp_clock_event(priv->ptp.clock, &event);
+ }
+ if (gis & GIS_PTMF) {
+ struct ravb_ptp_perout *perout = priv->ptp.perout;
+
+ if (perout->period) {
+ perout->target += perout->period;
+ ravb_ptp_update_compare(priv, perout->target);
+ }
+ }
+
+ if (gis) {
+ ravb_write(ndev, ~gis, GIS);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ u32 gccr;
+
+ priv->ptp.info = ravb_ptp_info;
+
+ priv->ptp.default_addend = ravb_read(ndev, GTI);
+ priv->ptp.current_addend = priv->ptp.default_addend;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+ gccr = ravb_read(ndev, GCCR) & ~GCCR_TCSS;
+ ravb_write(ndev, gccr | GCCR_TCSS_ADJGPTP, GCCR);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
+}
+
+void ravb_ptp_stop(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ ravb_write(ndev, 0, GIC);
+ ravb_write(ndev, 0, GIS);
+
+ ptp_clock_unregister(priv->ptp.clock);
+}
diff --git a/kernel/drivers/net/ethernet/renesas/sh_eth.c b/kernel/drivers/net/ethernet/renesas/sh_eth.c
index 7fb244f56..6a8fc0f34 100644
--- a/kernel/drivers/net/ethernet/renesas/sh_eth.c
+++ b/kernel/drivers/net/ethernet/renesas/sh_eth.c
@@ -52,6 +52,8 @@
NETIF_MSG_RX_ERR| \
NETIF_MSG_TX_ERR)
+#define SH_ETH_OFFSET_INVALID ((u16)~0)
+
#define SH_ETH_OFFSET_DEFAULTS \
[0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
@@ -404,6 +406,28 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
static void sh_eth_rcv_snd_disable(struct net_device *ndev);
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
+static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u16 offset = mdp->reg_offset[enum_index];
+
+ if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+ return;
+
+ iowrite32(data, mdp->addr + offset);
+}
+
+static u32 sh_eth_read(struct net_device *ndev, int enum_index)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u16 offset = mdp->reg_offset[enum_index];
+
+ if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+ return ~0U;
+
+ return ioread32(mdp->addr + offset);
+}
+
static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{
return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -1098,7 +1122,7 @@ static struct mdiobb_ops bb_ops = {
static void sh_eth_ring_free(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- int i;
+ int ringsize, i;
/* Free Rx skb ringbuffer */
if (mdp->rx_skbuff) {
@@ -1115,6 +1139,20 @@ static void sh_eth_ring_free(struct net_device *ndev)
}
kfree(mdp->tx_skbuff);
mdp->tx_skbuff = NULL;
+
+ if (mdp->rx_ring) {
+ ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
+ dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+ mdp->rx_desc_dma);
+ mdp->rx_ring = NULL;
+ }
+
+ if (mdp->tx_ring) {
+ ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
+ dma_free_coherent(NULL, ringsize, mdp->tx_ring,
+ mdp->tx_desc_dma);
+ mdp->tx_ring = NULL;
+ }
}
/* format skb and descriptor buffer */
@@ -1127,8 +1165,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
struct sh_eth_txdesc *txdesc = NULL;
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
- int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
dma_addr_t dma_addr;
+ u32 buf_len;
mdp->cur_rx = 0;
mdp->cur_tx = 0;
@@ -1148,17 +1187,17 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
- /* The size of the buffer is a multiple of 16 bytes. */
- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
- dma_addr = dma_map_single(&ndev->dev, skb->data,
- rxdesc->buffer_length,
+ /* The size of the buffer is a multiple of 32 bytes. */
+ buf_len = ALIGN(mdp->rx_buf_sz, 32);
+ rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
+ dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr)) {
kfree_skb(skb);
break;
}
mdp->rx_skbuff[i] = skb;
- rxdesc->addr = dma_addr;
+ rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* Rx descriptor address set */
@@ -1173,7 +1212,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
/* Mark the last entry as wrapping the ring. */
- rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
+ rxdesc->status |= cpu_to_edmac(mdp, RD_RDLE);
memset(mdp->tx_ring, 0, tx_ringsize);
@@ -1182,7 +1221,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
mdp->tx_skbuff[i] = NULL;
txdesc = &mdp->tx_ring[i];
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
- txdesc->buffer_length = 0;
+ txdesc->len = cpu_to_edmac(mdp, 0);
if (i == 0) {
/* Tx descriptor address set */
sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
@@ -1199,7 +1238,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
static int sh_eth_ring_init(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- int rx_ringsize, tx_ringsize, ret = 0;
+ int rx_ringsize, tx_ringsize;
/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
* card needs room to do 8 byte alignment, +2 so we can reserve
@@ -1212,28 +1251,22 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
- mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
- sizeof(*mdp->rx_skbuff), GFP_KERNEL);
- if (!mdp->rx_skbuff) {
- ret = -ENOMEM;
- return ret;
- }
+ mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
+ GFP_KERNEL);
+ if (!mdp->rx_skbuff)
+ return -ENOMEM;
- mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
- sizeof(*mdp->tx_skbuff), GFP_KERNEL);
- if (!mdp->tx_skbuff) {
- ret = -ENOMEM;
- goto skb_ring_free;
- }
+ mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
+ GFP_KERNEL);
+ if (!mdp->tx_skbuff)
+ goto ring_free;
/* Allocate all Rx descriptors. */
rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
GFP_KERNEL);
- if (!mdp->rx_ring) {
- ret = -ENOMEM;
- goto desc_ring_free;
- }
+ if (!mdp->rx_ring)
+ goto ring_free;
mdp->dirty_rx = 0;
@@ -1241,42 +1274,15 @@ static int sh_eth_ring_init(struct net_device *ndev)
tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
GFP_KERNEL);
- if (!mdp->tx_ring) {
- ret = -ENOMEM;
- goto desc_ring_free;
- }
- return ret;
-
-desc_ring_free:
- /* free DMA buffer */
- dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
+ if (!mdp->tx_ring)
+ goto ring_free;
+ return 0;
-skb_ring_free:
- /* Free Rx and Tx skb ring buffer */
+ring_free:
+ /* Free Rx and Tx skb ring buffer and DMA buffer */
sh_eth_ring_free(ndev);
- mdp->tx_ring = NULL;
- mdp->rx_ring = NULL;
- return ret;
-}
-
-static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
-{
- int ringsize;
-
- if (mdp->rx_ring) {
- ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
- dma_free_coherent(NULL, ringsize, mdp->rx_ring,
- mdp->rx_desc_dma);
- mdp->rx_ring = NULL;
- }
-
- if (mdp->tx_ring) {
- ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
- dma_free_coherent(NULL, ringsize, mdp->tx_ring,
- mdp->tx_desc_dma);
- mdp->tx_ring = NULL;
- }
+ return -ENOMEM;
}
static int sh_eth_dev_init(struct net_device *ndev, bool start)
@@ -1416,14 +1422,16 @@ static int sh_eth_txfree(struct net_device *ndev)
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
/* TACT bit must be checked before all the following reads */
- rmb();
+ dma_rmb();
netif_info(mdp, tx_done, ndev,
"tx entry %d status 0x%08x\n",
entry, edmac_to_cpu(mdp, txdesc->status));
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
- dma_unmap_single(&ndev->dev, txdesc->addr,
- txdesc->buffer_length, DMA_TO_DEVICE);
+ dma_unmap_single(&ndev->dev,
+ edmac_to_cpu(mdp, txdesc->addr),
+ edmac_to_cpu(mdp, txdesc->len) >> 16,
+ DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
mdp->tx_skbuff[entry] = NULL;
free_num++;
@@ -1433,7 +1441,7 @@ static int sh_eth_txfree(struct net_device *ndev)
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += txdesc->buffer_length;
+ ndev->stats.tx_bytes += edmac_to_cpu(mdp, txdesc->len) >> 16;
}
return free_num;
}
@@ -1450,17 +1458,18 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
- int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
dma_addr_t dma_addr;
+ u32 buf_len;
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
/* RACT bit must be checked before all the following reads */
- rmb();
+ dma_rmb();
desc_status = edmac_to_cpu(mdp, rxdesc->status);
- pkt_len = rxdesc->frame_length;
+ pkt_len = edmac_to_cpu(mdp, rxdesc->len) & RD_RFL;
if (--boguscnt < 0)
break;
@@ -1481,6 +1490,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (mdp->cd->shift_rd0)
desc_status >>= 16;
+ skb = mdp->rx_skbuff[entry];
if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
RD_RFS5 | RD_RFS6 | RD_RFS10)) {
ndev->stats.rx_errors++;
@@ -1496,17 +1506,17 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
ndev->stats.rx_missed_errors++;
if (desc_status & RD_RFS10)
ndev->stats.rx_over_errors++;
- } else {
+ } else if (skb) {
+ dma_addr = edmac_to_cpu(mdp, rxdesc->addr);
if (!mdp->cd->hw_swap)
sh_eth_soft_swap(
- phys_to_virt(ALIGN(rxdesc->addr, 4)),
+ phys_to_virt(ALIGN(dma_addr, 4)),
pkt_len + 2);
- skb = mdp->rx_skbuff[entry];
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
- dma_unmap_single(&ndev->dev, rxdesc->addr,
- ALIGN(mdp->rx_buf_sz, 16),
+ dma_unmap_single(&ndev->dev, dma_addr,
+ ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
@@ -1524,8 +1534,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
entry = mdp->dirty_rx % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
- /* The size of the buffer is 16 byte boundary. */
- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+ /* The size of the buffer is 32 byte boundary. */
+ buf_len = ALIGN(mdp->rx_buf_sz, 32);
+ rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, skbuff_size);
@@ -1533,8 +1544,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
break; /* Better luck next round. */
sh_eth_set_receive_align(skb);
dma_addr = dma_map_single(&ndev->dev, skb->data,
- rxdesc->buffer_length,
- DMA_FROM_DEVICE);
+ buf_len, DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr)) {
kfree_skb(skb);
break;
@@ -1542,12 +1552,12 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = skb;
skb_checksum_none_assert(skb);
- rxdesc->addr = dma_addr;
+ rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
}
- wmb(); /* RACT bit must be set after all the above writes */
+ dma_wmb(); /* RACT bit must be set after all the above writes */
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
- cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
+ cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDLE);
else
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP);
@@ -2239,10 +2249,8 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
sh_eth_dev_exit(ndev);
- /* Free all the skbuffs in the Rx queue. */
+ /* Free all the skbuffs in the Rx queue and the DMA buffers. */
sh_eth_ring_free(ndev);
- /* Free DMA buffer */
- sh_eth_free_dma_buffer(mdp);
}
/* Set new parameters */
@@ -2352,8 +2360,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < mdp->num_rx_ring; i++) {
rxdesc = &mdp->rx_ring[i];
- rxdesc->status = 0;
- rxdesc->addr = 0xBADF00D0;
+ rxdesc->status = cpu_to_edmac(mdp, 0);
+ rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0);
dev_kfree_skb(mdp->rx_skbuff[i]);
mdp->rx_skbuff[i] = NULL;
}
@@ -2371,6 +2379,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
+ dma_addr_t dma_addr;
u32 entry;
unsigned long flags;
@@ -2393,17 +2402,17 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
txdesc = &mdp->tx_ring[entry];
/* soft swap. */
if (!mdp->cd->hw_swap)
- sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
- skb->len + 2);
- txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
+ sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
+ dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
- txdesc->buffer_length = skb->len;
+ txdesc->addr = cpu_to_edmac(mdp, dma_addr);
+ txdesc->len = cpu_to_edmac(mdp, skb->len << 16);
- wmb(); /* TACT bit must be set after all the above writes */
+ dma_wmb(); /* TACT bit must be set after all the above writes */
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
@@ -2487,12 +2496,9 @@ static int sh_eth_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
- /* Free all the skbuffs in the Rx queue. */
+ /* Free all the skbuffs in the Rx queue and the DMA buffer. */
sh_eth_ring_free(ndev);
- /* free DMA buffer */
- sh_eth_free_dma_buffer(mdp);
-
pm_runtime_put_sync(&mdp->pdev->dev);
mdp->is_opened = 0;
@@ -3089,10 +3095,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ndev->dma = -1;
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- ret = -ENODEV;
+ if (ret < 0)
goto out_release;
- }
ndev->irq = ret;
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/kernel/drivers/net/ethernet/renesas/sh_eth.h b/kernel/drivers/net/ethernet/renesas/sh_eth.h
index 06dbbe520..72fcfc924 100644
--- a/kernel/drivers/net/ethernet/renesas/sh_eth.h
+++ b/kernel/drivers/net/ethernet/renesas/sh_eth.h
@@ -283,9 +283,9 @@ enum DMAC_IM_BIT {
DMAC_M_RINT1 = 0x00000001,
};
-/* Receive descriptor bit */
+/* Receive descriptor 0 bits */
enum RD_STS_BIT {
- RD_RACT = 0x80000000, RD_RDEL = 0x40000000,
+ RD_RACT = 0x80000000, RD_RDLE = 0x40000000,
RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
RD_RFE = 0x08000000, RD_RFS10 = 0x00000200,
RD_RFS9 = 0x00000100, RD_RFS8 = 0x00000080,
@@ -298,6 +298,12 @@ enum RD_STS_BIT {
#define RDFEND RD_RFP0
#define RD_RFP (RD_RFP1|RD_RFP0)
+/* Receive descriptor 1 bits */
+enum RD_LEN_BIT {
+ RD_RFL = 0x0000ffff, /* receive frame length */
+ RD_RBL = 0xffff0000, /* receive buffer length */
+};
+
/* FCFTR */
enum FCFTR_BIT {
FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000,
@@ -307,7 +313,7 @@ enum FCFTR_BIT {
#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
-/* Transmit descriptor bit */
+/* Transmit descriptor 0 bits */
enum TD_STS_BIT {
TD_TACT = 0x80000000, TD_TDLE = 0x40000000,
TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000,
@@ -317,6 +323,11 @@ enum TD_STS_BIT {
#define TDFEND TD_TFP0
#define TD_TFP (TD_TFP1|TD_TFP0)
+/* Transmit descriptor 1 bits */
+enum TD_LEN_BIT {
+ TD_TBL = 0xffff0000, /* transmit buffer length */
+};
+
/* RMCR */
enum RMCR_BIT {
RMCR_RNC = 0x00000001,
@@ -425,15 +436,9 @@ enum TSU_FWSLC_BIT {
*/
struct sh_eth_txdesc {
u32 status; /* TD0 */
-#if defined(__LITTLE_ENDIAN)
- u16 pad0; /* TD1 */
- u16 buffer_length; /* TD1 */
-#else
- u16 buffer_length; /* TD1 */
- u16 pad0; /* TD1 */
-#endif
+ u32 len; /* TD1 */
u32 addr; /* TD2 */
- u32 pad1; /* padding data */
+ u32 pad0; /* padding data */
} __aligned(2) __packed;
/* The sh ether Rx buffer descriptors.
@@ -441,13 +446,7 @@ struct sh_eth_txdesc {
*/
struct sh_eth_rxdesc {
u32 status; /* RD0 */
-#if defined(__LITTLE_ENDIAN)
- u16 frame_length; /* RD1 */
- u16 buffer_length; /* RD1 */
-#else
- u16 buffer_length; /* RD1 */
- u16 frame_length; /* RD1 */
-#endif
+ u32 len; /* RD1 */
u32 addr; /* RD2 */
u32 pad0; /* padding data */
} __aligned(2) __packed;
@@ -546,31 +545,6 @@ static inline void sh_eth_soft_swap(char *src, int len)
#endif
}
-#define SH_ETH_OFFSET_INVALID ((u16) ~0)
-
-static inline void sh_eth_write(struct net_device *ndev, u32 data,
- int enum_index)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- u16 offset = mdp->reg_offset[enum_index];
-
- if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
- return;
-
- iowrite32(data, mdp->addr + offset);
-}
-
-static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- u16 offset = mdp->reg_offset[enum_index];
-
- if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
- return ~0U;
-
- return ioread32(mdp->addr + offset);
-}
-
static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
int enum_index)
{