summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/net/ethernet/apm
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/drivers/net/ethernet/apm
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/drivers/net/ethernet/apm')
-rw-r--r--kernel/drivers/net/ethernet/apm/xgene/Makefile2
-rw-r--r--kernel/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c128
-rw-r--r--kernel/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h26
-rw-r--r--kernel/drivers/net/ethernet/apm/xgene/xgene_enet_main.c669
-rw-r--r--kernel/drivers/net/ethernet/apm/xgene/xgene_enet_main.h53
-rw-r--r--kernel/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c200
-rw-r--r--kernel/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h49
-rw-r--r--kernel/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c73
-rw-r--r--kernel/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c20
-rw-r--r--kernel/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h22
10 files changed, 1041 insertions, 201 deletions
diff --git a/kernel/drivers/net/ethernet/apm/xgene/Makefile b/kernel/drivers/net/ethernet/apm/xgene/Makefile
index 68be56554..700b5abe5 100644
--- a/kernel/drivers/net/ethernet/apm/xgene/Makefile
+++ b/kernel/drivers/net/ethernet/apm/xgene/Makefile
@@ -3,5 +3,5 @@
#
xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \
- xgene_enet_main.o xgene_enet_ethtool.o
+ xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o
obj-$(CONFIG_NET_XGENE) += xgene-enet.o
diff --git a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index b927021c6..c31e691d1 100644
--- a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -87,10 +87,11 @@ static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
int i;
xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
- for (i = 0; i < NUM_RING_CONFIG; i++) {
+ for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
ring->state[i]);
}
@@ -98,7 +99,7 @@ static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
{
- memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG);
+ memset(ring->state, 0, sizeof(ring->state));
xgene_enet_write_ring_state(ring);
}
@@ -106,7 +107,8 @@ static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
{
xgene_enet_ring_set_type(ring);
- if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0)
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
+ xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
xgene_enet_ring_set_recombbuf(ring);
xgene_enet_ring_init(ring);
@@ -141,8 +143,8 @@ static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
}
-struct xgene_enet_desc_ring *xgene_enet_setup_ring(
- struct xgene_enet_desc_ring *ring)
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+ struct xgene_enet_desc_ring *ring)
{
u32 size = ring->size;
u32 i, data;
@@ -168,7 +170,7 @@ struct xgene_enet_desc_ring *xgene_enet_setup_ring(
return ring;
}
-void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
{
u32 data;
bool is_bufpool;
@@ -186,6 +188,22 @@ out:
xgene_enet_clr_ring_state(ring);
}
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+ iowrite32(count, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+ u32 __iomem *cmd_base = ring->cmd_base;
+ u32 ring_state, num_msgs;
+
+ ring_state = ioread32(&cmd_base[1]);
+ num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
+
+ return num_msgs;
+}
+
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status)
@@ -441,8 +459,48 @@ static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
}
+static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
+{
+ struct device *dev = &pdata->pdev->dev;
+
+ if (dev->of_node) {
+ struct clk *parent = clk_get_parent(pdata->clk);
+
+ switch (pdata->phy_speed) {
+ case SPEED_10:
+ clk_set_rate(parent, 2500000);
+ break;
+ case SPEED_100:
+ clk_set_rate(parent, 25000000);
+ break;
+ default:
+ clk_set_rate(parent, 125000000);
+ break;
+ }
+ }
+#ifdef CONFIG_ACPI
+ else {
+ switch (pdata->phy_speed) {
+ case SPEED_10:
+ acpi_evaluate_object(ACPI_HANDLE(dev),
+ "S10", NULL, NULL);
+ break;
+ case SPEED_100:
+ acpi_evaluate_object(ACPI_HANDLE(dev),
+ "S100", NULL, NULL);
+ break;
+ default:
+ acpi_evaluate_object(ACPI_HANDLE(dev),
+ "S1G", NULL, NULL);
+ break;
+ }
+ }
+#endif
+}
+
static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
{
+ struct device *dev = &pdata->pdev->dev;
u32 value, mc2;
u32 intf_ctl, rgmii;
u32 icm0, icm2;
@@ -458,12 +516,14 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
switch (pdata->phy_speed) {
case SPEED_10:
ENET_INTERFACE_MODE2_SET(&mc2, 1);
+ intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
CFG_MACMODE_SET(&icm0, 0);
CFG_WAITASYNCRD_SET(&icm2, 500);
rgmii &= ~CFG_SPEED_1250;
break;
case SPEED_100:
ENET_INTERFACE_MODE2_SET(&mc2, 1);
+ intf_ctl &= ~ENET_GHD_MODE;
intf_ctl |= ENET_LHD_MODE;
CFG_MACMODE_SET(&icm0, 1);
CFG_WAITASYNCRD_SET(&icm2, 80);
@@ -471,15 +531,23 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
break;
default:
ENET_INTERFACE_MODE2_SET(&mc2, 2);
+ intf_ctl &= ~ENET_LHD_MODE;
intf_ctl |= ENET_GHD_MODE;
- CFG_TXCLK_MUXSEL0_SET(&rgmii, 4);
+ CFG_MACMODE_SET(&icm0, 2);
+ CFG_WAITASYNCRD_SET(&icm2, 0);
+ if (dev->of_node) {
+ CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
+ CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
+ }
+ rgmii |= CFG_SPEED_1250;
+
xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
break;
}
- mc2 |= FULL_DUPLEX2;
+ mc2 |= FULL_DUPLEX2 | PAD_CRC;
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
@@ -498,6 +566,7 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
/* Rtype should be copied from FP */
xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
+ xgene_enet_configure_clock(pdata);
/* Rx-Tx traffic resume */
xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
@@ -593,7 +662,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
if (!xgene_ring_mgr_init(pdata))
return -ENODEV;
- if (pdata->clk) {
+ if (!IS_ERR(pdata->clk)) {
clk_prepare_enable(pdata->clk);
clk_disable_unprepare(pdata->clk);
clk_prepare_enable(pdata->clk);
@@ -612,7 +681,8 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
{
- clk_disable_unprepare(pdata->clk);
+ if (!IS_ERR(pdata->clk))
+ clk_disable_unprepare(pdata->clk);
}
static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
@@ -671,16 +741,24 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
netdev_dbg(ndev, "No phy-handle found in DT\n");
return -ENODEV;
}
- pdata->phy_dev = of_phy_find_device(phy_np);
- }
- phy_dev = pdata->phy_dev;
+ phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
+ 0, pdata->phy_mode);
+ if (!phy_dev) {
+ netdev_err(ndev, "Could not connect to PHY\n");
+ return -ENODEV;
+ }
+
+ pdata->phy_dev = phy_dev;
+ } else {
+ phy_dev = pdata->phy_dev;
- if (!phy_dev ||
- phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
- pdata->phy_mode)) {
- netdev_err(ndev, "Could not connect to PHY\n");
- return -ENODEV;
+ if (!phy_dev ||
+ phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
+ pdata->phy_mode)) {
+ netdev_err(ndev, "Could not connect to PHY\n");
+ return -ENODEV;
+ }
}
pdata->phy_speed = SPEED_UNKNOWN;
@@ -734,7 +812,7 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
if (ret)
return -EINVAL;
- phy = get_phy_device(mdio, phy_id, true);
+ phy = get_phy_device(mdio, phy_id, false);
if (!phy || IS_ERR(phy))
return -EIO;
@@ -783,6 +861,9 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
{
+ if (pdata->phy_dev)
+ phy_disconnect(pdata->phy_dev);
+
mdiobus_unregister(pdata->mdio_bus);
mdiobus_free(pdata->mdio_bus);
pdata->mdio_bus = NULL;
@@ -803,3 +884,12 @@ struct xgene_port_ops xgene_gport_ops = {
.cle_bypass = xgene_enet_cle_bypass,
.shutdown = xgene_gport_shutdown,
};
+
+struct xgene_ring_ops xgene_ring1_ops = {
+ .num_ring_config = NUM_RING_CONFIG,
+ .num_ring_id_shift = 6,
+ .setup = xgene_enet_setup_ring,
+ .clear = xgene_enet_clear_ring,
+ .wr_cmd = xgene_enet_wr_cmd,
+ .len = xgene_enet_ring_len,
+};
diff --git a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index d9bc89d69..c153a1dc5 100644
--- a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -26,6 +26,7 @@
struct xgene_enet_pdata;
struct xgene_enet_stats;
+struct xgene_enet_desc_ring;
/* clears and then set bits */
static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len)
@@ -101,8 +102,8 @@ enum xgene_enet_rm {
#define BLOCK_ETH_CSR_OFFSET 0x2000
#define BLOCK_ETH_RING_IF_OFFSET 0x9000
+#define BLOCK_ETH_CLKRST_CSR_OFFSET 0xc000
#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000
-
#define BLOCK_ETH_MAC_OFFSET 0x0000
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
@@ -143,6 +144,7 @@ enum xgene_enet_rm {
#define CFG_BYPASS_UNISEC_RX BIT(1)
#define CFG_CLE_BYPASS_EN0 BIT(31)
#define CFG_TXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 29, 3)
+#define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3)
#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2)
#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12)
@@ -179,6 +181,7 @@ enum xgene_enet_rm {
#define ENET_LHD_MODE BIT(25)
#define ENET_GHD_MODE BIT(26)
#define FULL_DUPLEX2 BIT(0)
+#define PAD_CRC BIT(2)
#define SCAN_AUTO_INCR BIT(5)
#define TBYT_ADDR 0x38
#define TPKT_ADDR 0x39
@@ -192,12 +195,16 @@ enum xgene_enet_rm {
#define USERINFO_LEN 32
#define FPQNUM_POS 32
#define FPQNUM_LEN 12
+#define NV_POS 50
+#define NV_LEN 1
+#define LL_POS 51
+#define LL_LEN 1
#define LERR_POS 60
#define LERR_LEN 3
#define STASH_POS 52
#define STASH_LEN 2
#define BUFDATALEN_POS 48
-#define BUFDATALEN_LEN 12
+#define BUFDATALEN_LEN 15
#define DATAADDR_POS 0
#define DATAADDR_LEN 42
#define COHERENT_POS 63
@@ -214,9 +221,19 @@ enum xgene_enet_rm {
#define IPHDR_LEN 6
#define EC_POS 22 /* Enable checksum */
#define EC_LEN 1
+#define ET_POS 23 /* Enable TSO */
#define IS_POS 24 /* IP protocol select */
#define IS_LEN 1
#define TYPE_ETH_WORK_MESSAGE_POS 44
+#define LL_BYTES_MSB_POS 56
+#define LL_BYTES_MSB_LEN 8
+#define LL_BYTES_LSB_POS 48
+#define LL_BYTES_LSB_LEN 12
+#define LL_LEN_POS 48
+#define LL_LEN_LEN 8
+#define DATALEN_MASK GENMASK(11, 0)
+
+#define LAST_BUFFER (0x7800ULL << BUFDATALEN_POS)
struct xgene_enet_raw_desc {
__le64 m0;
@@ -261,6 +278,7 @@ enum xgene_enet_ring_type {
enum xgene_ring_owner {
RING_OWNER_ETH0,
+ RING_OWNER_ETH1,
RING_OWNER_CPU = 15,
RING_OWNER_INVALID
};
@@ -314,9 +332,6 @@ static inline u16 xgene_enet_get_numslots(u16 id, u32 size)
size / WORK_DESC_SIZE;
}
-struct xgene_enet_desc_ring *xgene_enet_setup_ring(
- struct xgene_enet_desc_ring *ring);
-void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring);
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status);
@@ -327,5 +342,6 @@ bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
extern struct xgene_mac_ops xgene_gmac_ops;
extern struct xgene_port_ops xgene_gport_ops;
+extern struct xgene_ring_ops xgene_ring1_ops;
#endif /* __XGENE_ENET_HW_H__ */
diff --git a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 40d3530d7..d0ae1a6cc 100644
--- a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -28,6 +28,9 @@
#define RES_RING_CSR 1
#define RES_RING_CMD 2
+static const struct of_device_id xgene_enet_of_match[];
+static const struct acpi_device_id xgene_enet_acpi_match[];
+
static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
struct xgene_enet_raw_desc16 *raw_desc;
@@ -48,6 +51,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
{
struct sk_buff *skb;
struct xgene_enet_raw_desc16 *raw_desc;
+ struct xgene_enet_pdata *pdata;
struct net_device *ndev;
struct device *dev;
dma_addr_t dma_addr;
@@ -58,6 +62,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
ndev = buf_pool->ndev;
dev = ndev_to_dev(buf_pool->ndev);
+ pdata = netdev_priv(ndev);
bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
len = XGENE_ENET_MAX_MTU;
@@ -82,7 +87,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
tail = (tail + 1) & slots;
}
- iowrite32(nbuf, buf_pool->cmd);
+ pdata->ring_ops->wr_cmd(buf_pool, nbuf);
buf_pool->tail = tail;
return 0;
@@ -102,26 +107,16 @@ static u8 xgene_enet_hdr_len(const void *data)
return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
}
-static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
-{
- u32 __iomem *cmd_base = ring->cmd_base;
- u32 ring_state, num_msgs;
-
- ring_state = ioread32(&cmd_base[1]);
- num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN);
-
- return num_msgs >> NUMMSGSINQ_POS;
-}
-
static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
+ struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
struct xgene_enet_raw_desc16 *raw_desc;
u32 slots = buf_pool->slots - 1;
u32 tail = buf_pool->tail;
u32 userinfo;
int i, len;
- len = xgene_enet_ring_len(buf_pool);
+ len = pdata->ring_ops->len(buf_pool);
for (i = 0; i < len; i++) {
tail = (tail - 1) & slots;
raw_desc = &buf_pool->raw_desc16[tail];
@@ -131,7 +126,7 @@ static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
}
- iowrite32(-len, buf_pool->cmd);
+ pdata->ring_ops->wr_cmd(buf_pool, -len);
buf_pool->tail = tail;
}
@@ -152,18 +147,27 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
{
struct sk_buff *skb;
struct device *dev;
+ skb_frag_t *frag;
+ dma_addr_t *frag_dma_addr;
u16 skb_index;
u8 status;
- int ret = 0;
+ int i, ret = 0;
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = cp_ring->cp_skb[skb_index];
+ frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
dev = ndev_to_dev(cp_ring->ndev);
dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
- GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)),
+ skb_headlen(skb),
DMA_TO_DEVICE);
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ }
+
/* Checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
@@ -184,12 +188,16 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
static u64 xgene_enet_work_msg(struct sk_buff *skb)
{
+ struct net_device *ndev = skb->dev;
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct iphdr *iph;
- u8 l3hlen, l4hlen = 0;
- u8 csum_enable = 0;
- u8 proto = 0;
- u8 ethhdr;
- u64 hopinfo;
+ u8 l3hlen = 0, l4hlen = 0;
+ u8 ethhdr, proto = 0, csum_enable = 0;
+ u64 hopinfo = 0;
+ u32 hdr_len, mss = 0;
+ u32 i, len, nr_frags;
+
+ ethhdr = xgene_enet_hdr_len(skb->data);
if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
unlikely(skb->protocol != htons(ETH_P_8021Q)))
@@ -206,14 +214,40 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
l4hlen = tcp_hdrlen(skb) >> 2;
csum_enable = 1;
proto = TSO_IPPROTO_TCP;
+ if (ndev->features & NETIF_F_TSO) {
+ hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+
+ if (skb_is_nonlinear(skb)) {
+ len = skb_headlen(skb);
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ for (i = 0; i < 2 && i < nr_frags; i++)
+ len += skb_shinfo(skb)->frags[i].size;
+
+ /* HW requires header must reside in 3 buffer */
+ if (unlikely(hdr_len > len)) {
+ if (skb_linearize(skb))
+ return 0;
+ }
+ }
+
+ if (!mss || ((skb->len - hdr_len) <= mss))
+ goto out;
+
+ if (mss != pdata->mss) {
+ pdata->mss = mss;
+ pdata->mac_ops->set_mss(pdata);
+ }
+ hopinfo |= SET_BIT(ET);
+ }
} else if (iph->protocol == IPPROTO_UDP) {
l4hlen = UDP_HDR_SIZE;
csum_enable = 1;
}
out:
l3hlen = ip_hdrlen(skb) >> 2;
- ethhdr = xgene_enet_hdr_len(skb->data);
- hopinfo = SET_VAL(TCPHDR, l4hlen) |
+ hopinfo |= SET_VAL(TCPHDR, l4hlen) |
SET_VAL(IPHDR, l3hlen) |
SET_VAL(ETHHDR, ethhdr) |
SET_VAL(EC, csum_enable) |
@@ -224,35 +258,172 @@ out:
return hopinfo;
}
+static u16 xgene_enet_encode_len(u16 len)
+{
+ return (len == BUFLEN_16K) ? 0 : len;
+}
+
+static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
+{
+ desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
+ SET_VAL(BUFDATALEN, len));
+}
+
+static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
+{
+ __le64 *exp_bufs;
+
+ exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
+ memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
+ ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
+
+ return exp_bufs;
+}
+
+static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
+{
+ return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
+}
+
static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
struct sk_buff *skb)
{
struct device *dev = ndev_to_dev(tx_ring->ndev);
+ struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
struct xgene_enet_raw_desc *raw_desc;
- dma_addr_t dma_addr;
+ __le64 *exp_desc = NULL, *exp_bufs = NULL;
+ dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
+ skb_frag_t *frag;
u16 tail = tx_ring->tail;
u64 hopinfo;
+ u32 len, hw_len;
+ u8 ll = 0, nv = 0, idx = 0;
+ bool split = false;
+ u32 size, offset, ell_bytes = 0;
+ u32 i, fidx, nr_frags, count = 1;
raw_desc = &tx_ring->raw_desc[tail];
+ tail = (tail + 1) & (tx_ring->slots - 1);
memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
- dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ hopinfo = xgene_enet_work_msg(skb);
+ if (!hopinfo)
+ return -EINVAL;
+ raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
+ hopinfo);
+
+ len = skb_headlen(skb);
+ hw_len = xgene_enet_encode_len(len);
+
+ dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
netdev_err(tx_ring->ndev, "DMA mapping error\n");
return -EINVAL;
}
/* Hardware expects descriptor in little endian format */
- raw_desc->m0 = cpu_to_le64(tail);
raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
- SET_VAL(BUFDATALEN, skb->len) |
+ SET_VAL(BUFDATALEN, hw_len) |
SET_BIT(COHERENT));
- hopinfo = xgene_enet_work_msg(skb);
- raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
- hopinfo);
- tx_ring->cp_ring->cp_skb[tail] = skb;
- return 0;
+ if (!skb_is_nonlinear(skb))
+ goto out;
+
+ /* scatter gather */
+ nv = 1;
+ exp_desc = (void *)&tx_ring->raw_desc[tail];
+ tail = (tail + 1) & (tx_ring->slots - 1);
+ memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (i = nr_frags; i < 4 ; i++)
+ exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
+
+ frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
+
+ for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
+ if (!split) {
+ frag = &skb_shinfo(skb)->frags[fidx];
+ size = skb_frag_size(frag);
+ offset = 0;
+
+ pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pbuf_addr))
+ return -EINVAL;
+
+ frag_dma_addr[fidx] = pbuf_addr;
+ fidx++;
+
+ if (size > BUFLEN_16K)
+ split = true;
+ }
+
+ if (size > BUFLEN_16K) {
+ len = BUFLEN_16K;
+ size -= BUFLEN_16K;
+ } else {
+ len = size;
+ split = false;
+ }
+
+ dma_addr = pbuf_addr + offset;
+ hw_len = xgene_enet_encode_len(len);
+
+ switch (i) {
+ case 0:
+ case 1:
+ case 2:
+ xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
+ break;
+ case 3:
+ if (split || (fidx != nr_frags)) {
+ exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
+ xgene_set_addr_len(exp_bufs, idx, dma_addr,
+ hw_len);
+ idx++;
+ ell_bytes += len;
+ } else {
+ xgene_set_addr_len(exp_desc, i, dma_addr,
+ hw_len);
+ }
+ break;
+ default:
+ xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
+ idx++;
+ ell_bytes += len;
+ break;
+ }
+
+ if (split)
+ offset += BUFLEN_16K;
+ }
+ count++;
+
+ if (idx) {
+ ll = 1;
+ dma_addr = dma_map_single(dev, exp_bufs,
+ sizeof(u64) * MAX_EXP_BUFFS,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+ i = ell_bytes >> LL_BYTES_LSB_LEN;
+ exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
+ SET_VAL(LL_BYTES_MSB, i) |
+ SET_VAL(LL_LEN, idx));
+ raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
+ }
+
+out:
+ raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
+ SET_VAL(USERINFO, tx_ring->tail));
+ tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
+ pdata->tx_level += count;
+ tx_ring->tail = tail;
+
+ return count;
}
static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
@@ -260,29 +431,32 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
- struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
- u32 tx_level, cq_level;
+ u32 tx_level = pdata->tx_level;
+ int count;
+
+ if (tx_level < pdata->txc_level)
+ tx_level += ((typeof(pdata->tx_level))~0U);
- tx_level = xgene_enet_ring_len(tx_ring);
- cq_level = xgene_enet_ring_len(cp_ring);
- if (unlikely(tx_level > pdata->tx_qcnt_hi ||
- cq_level > pdata->cp_qcnt_hi)) {
+ if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
- if (xgene_enet_setup_tx_desc(tx_ring, skb)) {
+ if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
+ return NETDEV_TX_OK;
+
+ count = xgene_enet_setup_tx_desc(tx_ring, skb);
+ if (count <= 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- iowrite32(1, tx_ring->cmd);
skb_tx_timestamp(skb);
- tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
pdata->stats.tx_packets++;
pdata->stats.tx_bytes += skb->len;
+ pdata->ring_ops->wr_cmd(tx_ring, count);
return NETDEV_TX_OK;
}
@@ -331,7 +505,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
/* strip off CRC as HW isn't doing this */
datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
- datalen -= 4;
+ datalen = (datalen & DATALEN_MASK) - 4;
prefetch(skb->data - NET_IP_ALIGN);
skb_put(skb, datalen);
@@ -363,42 +537,64 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
int budget)
{
struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
- struct xgene_enet_raw_desc *raw_desc;
+ struct xgene_enet_raw_desc *raw_desc, *exp_desc;
u16 head = ring->head;
u16 slots = ring->slots - 1;
- int ret, count = 0;
+ int ret, desc_count, count = 0, processed = 0;
+ bool is_completion;
do {
raw_desc = &ring->raw_desc[head];
+ desc_count = 0;
+ is_completion = false;
+ exp_desc = NULL;
if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
break;
/* read fpqnum field after dataaddr field */
dma_rmb();
- if (is_rx_desc(raw_desc))
+ if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
+ head = (head + 1) & slots;
+ exp_desc = &ring->raw_desc[head];
+
+ if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
+ head = (head - 1) & slots;
+ break;
+ }
+ dma_rmb();
+ count++;
+ desc_count++;
+ }
+ if (is_rx_desc(raw_desc)) {
ret = xgene_enet_rx_frame(ring, raw_desc);
- else
+ } else {
ret = xgene_enet_tx_completion(ring, raw_desc);
+ is_completion = true;
+ }
xgene_enet_mark_desc_slot_empty(raw_desc);
+ if (exp_desc)
+ xgene_enet_mark_desc_slot_empty(exp_desc);
head = (head + 1) & slots;
count++;
+ desc_count++;
+ processed++;
+ if (is_completion)
+ pdata->txc_level += desc_count;
if (ret)
break;
} while (--budget);
if (likely(count)) {
- iowrite32(-count, ring->cmd);
+ pdata->ring_ops->wr_cmd(ring, -count);
ring->head = head;
- if (netif_queue_stopped(ring->ndev)) {
- if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low)
- netif_wake_queue(ring->ndev);
- }
+ if (netif_queue_stopped(ring->ndev))
+ netif_start_queue(ring->ndev);
}
- return count;
+ return processed;
}
static int xgene_enet_napi(struct napi_struct *napi, const int budget)
@@ -500,10 +696,10 @@ static int xgene_enet_open(struct net_device *ndev)
mac_ops->tx_enable(pdata);
mac_ops->rx_enable(pdata);
+ xgene_enet_napi_enable(pdata);
ret = xgene_enet_register_irq(ndev);
if (ret)
return ret;
- xgene_enet_napi_enable(pdata);
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
phy_start(pdata->phy_dev);
@@ -527,13 +723,13 @@ static int xgene_enet_close(struct net_device *ndev)
else
cancel_delayed_work_sync(&pdata->link_work);
- xgene_enet_napi_disable(pdata);
- xgene_enet_free_irq(ndev);
- xgene_enet_process_ring(pdata->rx_ring, -1);
-
mac_ops->tx_disable(pdata);
mac_ops->rx_disable(pdata);
+ xgene_enet_free_irq(ndev);
+ xgene_enet_napi_disable(pdata);
+ xgene_enet_process_ring(pdata->rx_ring, -1);
+
return 0;
}
@@ -545,7 +741,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
pdata = netdev_priv(ring->ndev);
dev = ndev_to_dev(ring->ndev);
- xgene_enet_clear_ring(ring);
+ pdata->ring_ops->clear(ring);
dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
@@ -598,15 +794,17 @@ static int xgene_enet_get_ring_size(struct device *dev,
static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
{
+ struct xgene_enet_pdata *pdata;
struct device *dev;
if (!ring)
return;
dev = ndev_to_dev(ring->ndev);
+ pdata = netdev_priv(ring->ndev);
if (ring->desc_addr) {
- xgene_enet_clear_ring(ring);
+ pdata->ring_ops->clear(ring);
dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
devm_kfree(dev, ring);
@@ -637,6 +835,25 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
}
}
+static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
+{
+ if ((pdata->enet_id == XGENE_ENET2) &&
+ (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
+ return true;
+ }
+
+ return false;
+}
+
+static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
+{
+ u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
+
+ return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
+}
+
static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
struct net_device *ndev, u32 ring_num,
enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
@@ -668,9 +885,20 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
}
ring->size = size;
- ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
+ if (is_irq_mbox_required(pdata, ring)) {
+ ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
+ &ring->irq_mbox_dma, GFP_KERNEL);
+ if (!ring->irq_mbox_addr) {
+ dma_free_coherent(dev, size, ring->desc_addr,
+ ring->dma);
+ devm_kfree(dev, ring);
+ return NULL;
+ }
+ }
+
+ ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
- ring = xgene_enet_setup_ring(ring);
+ ring = pdata->ring_ops->setup(ring);
netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
ring->num, ring->size, ring->id, ring->slots);
@@ -682,20 +910,44 @@ static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
return (owner << 6) | (bufnum & GENMASK(5, 0));
}
+static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
+{
+ enum xgene_ring_owner owner;
+
+ if (p->enet_id == XGENE_ENET1) {
+ switch (p->phy_mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ owner = RING_OWNER_ETH0;
+ break;
+ default:
+ owner = (!p->port_id) ? RING_OWNER_ETH0 :
+ RING_OWNER_ETH1;
+ break;
+ }
+ } else {
+ owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
+ }
+
+ return owner;
+}
+
static int xgene_enet_create_desc_rings(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
struct xgene_enet_desc_ring *buf_pool = NULL;
+ enum xgene_ring_owner owner;
+ dma_addr_t dma_exp_bufs;
u8 cpu_bufnum = pdata->cpu_bufnum;
u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum;
u16 ring_num = pdata->ring_num;
u16 ring_id;
- int ret;
+ int ret, size;
/* allocate rx descriptor ring */
+ owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, ring_id);
@@ -705,7 +957,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
}
/* allocate buffer pool for receiving packets */
- ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++);
+ owner = xgene_derive_ring_owner(pdata);
+ ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_2KB, ring_id);
if (!buf_pool) {
@@ -734,13 +987,23 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
pdata->rx_ring = rx_ring;
/* allocate tx descriptor ring */
- ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++);
+ owner = xgene_derive_ring_owner(pdata);
+ ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, ring_id);
if (!tx_ring) {
ret = -ENOMEM;
goto err;
}
+
+ size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
+ tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs,
+ GFP_KERNEL);
+ if (!tx_ring->exp_bufs) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
pdata->tx_ring = tx_ring;
if (!pdata->cq_cnt) {
@@ -765,12 +1028,20 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
ret = -ENOMEM;
goto err;
}
+
+ size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
+ cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
+ size, GFP_KERNEL);
+ if (!cp_ring->frag_dma_addr) {
+ devm_kfree(dev, cp_ring->cp_skb);
+ ret = -ENOMEM;
+ goto err;
+ }
+
pdata->tx_ring->cp_ring = cp_ring;
pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
- pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
- pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
- pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
+ pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128;
return 0;
@@ -818,55 +1089,80 @@ static const struct net_device_ops xgene_ndev_ops = {
.ndo_set_mac_address = xgene_enet_set_mac_address,
};
-static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata)
+#ifdef CONFIG_ACPI
+static int xgene_get_port_id_acpi(struct device *dev,
+ struct xgene_enet_pdata *pdata)
{
- u32 id = 0;
- int ret;
+ acpi_status status;
+ u64 temp;
- ret = device_property_read_u32(dev, "port-id", &id);
- if (!ret && id > 1) {
- dev_err(dev, "Incorrect port-id specified\n");
- return -ENODEV;
+ status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
+ if (ACPI_FAILURE(status)) {
+ pdata->port_id = 0;
+ } else {
+ pdata->port_id = temp;
}
- pdata->port_id = id;
-
return 0;
}
+#endif
-static int xgene_get_mac_address(struct device *dev,
- unsigned char *addr)
+static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
{
+ u32 id = 0;
int ret;
- ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
- if (ret)
- ret = device_property_read_u8_array(dev, "mac-address",
- addr, 6);
- if (ret)
- return -ENODEV;
+ ret = of_property_read_u32(dev->of_node, "port-id", &id);
+ if (ret) {
+ pdata->port_id = 0;
+ ret = 0;
+ } else {
+ pdata->port_id = id & BIT(0);
+ }
- return ETH_ALEN;
+ return ret;
}
-static int xgene_get_phy_mode(struct device *dev)
+static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
{
- int i, ret;
- char *modestr;
+ struct device *dev = &pdata->pdev->dev;
+ int delay, ret;
- ret = device_property_read_string(dev, "phy-connection-type",
- (const char **)&modestr);
- if (ret)
- ret = device_property_read_string(dev, "phy-mode",
- (const char **)&modestr);
- if (ret)
- return -ENODEV;
+ ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
+ if (ret) {
+ pdata->tx_delay = 4;
+ return 0;
+ }
- for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
- if (!strcasecmp(modestr, phy_modes(i)))
- return i;
+ if (delay < 0 || delay > 7) {
+ dev_err(dev, "Invalid tx-delay specified\n");
+ return -EINVAL;
}
- return -ENODEV;
+
+ pdata->tx_delay = delay;
+
+ return 0;
+}
+
+static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
+{
+ struct device *dev = &pdata->pdev->dev;
+ int delay, ret;
+
+ ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
+ if (ret) {
+ pdata->rx_delay = 2;
+ return 0;
+ }
+
+ if (delay < 0 || delay > 7) {
+ dev_err(dev, "Invalid rx-delay specified\n");
+ return -EINVAL;
+ }
+
+ pdata->rx_delay = delay;
+
+ return 0;
}
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
@@ -876,7 +1172,8 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
struct device *dev;
struct resource *res;
void __iomem *base_addr;
- int ret;
+ u32 offset;
+ int ret = 0;
pdev = pdata->pdev;
dev = &pdev->dev;
@@ -917,16 +1214,21 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
return -ENOMEM;
}
- ret = xgene_get_port_id(dev, pdata);
+ if (dev->of_node)
+ ret = xgene_get_port_id_dt(dev, pdata);
+#ifdef CONFIG_ACPI
+ else
+ ret = xgene_get_port_id_acpi(dev, pdata);
+#endif
if (ret)
return ret;
- if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
+ if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
- pdata->phy_mode = xgene_get_phy_mode(dev);
+ pdata->phy_mode = device_get_phy_mode(dev);
if (pdata->phy_mode < 0) {
dev_err(dev, "Unable to get phy-connection-type\n");
return pdata->phy_mode;
@@ -938,6 +1240,14 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
return -ENODEV;
}
+ ret = xgene_get_tx_delay(pdata);
+ if (ret)
+ return ret;
+
+ ret = xgene_get_rx_delay(pdata);
+ if (ret)
+ return ret;
+
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(dev, "Unable to get ENET Rx IRQ\n");
@@ -949,27 +1259,35 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
ret = platform_get_irq(pdev, 1);
if (ret <= 0) {
- dev_err(dev, "Unable to get ENET Tx completion IRQ\n");
- ret = ret ? : -ENXIO;
- return ret;
+ pdata->cq_cnt = 0;
+ dev_info(dev, "Unable to get Tx completion IRQ,"
+ "using Rx IRQ instead\n");
+ } else {
+ pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
+ pdata->txc_irq = ret;
}
- pdata->txc_irq = ret;
}
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
/* Firmware may have set up the clock already. */
- pdata->clk = NULL;
+ dev_info(dev, "clocks have been setup already\n");
}
- base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
+ base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+ else
+ base_addr = pdata->base_addr;
pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
- pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
+ offset = (pdata->enet_id == XGENE_ENET1) ?
+ BLOCK_ETH_MAC_CSR_OFFSET :
+ X2_BLOCK_ETH_MAC_CSR_OFFSET;
+ pdata->mcx_mac_csr_addr = base_addr + offset;
} else {
pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
@@ -1024,33 +1342,59 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
pdata->mac_ops = &xgene_sgmac_ops;
pdata->port_ops = &xgene_sgport_ops;
pdata->rm = RM1;
- pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
break;
default:
pdata->mac_ops = &xgene_xgmac_ops;
pdata->port_ops = &xgene_xgport_ops;
pdata->rm = RM0;
- pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
break;
}
- switch (pdata->port_id) {
- case 0:
- pdata->cpu_bufnum = START_CPU_BUFNUM_0;
- pdata->eth_bufnum = START_ETH_BUFNUM_0;
- pdata->bp_bufnum = START_BP_BUFNUM_0;
- pdata->ring_num = START_RING_NUM_0;
- break;
- case 1:
- pdata->cpu_bufnum = START_CPU_BUFNUM_1;
- pdata->eth_bufnum = START_ETH_BUFNUM_1;
- pdata->bp_bufnum = START_BP_BUFNUM_1;
- pdata->ring_num = START_RING_NUM_1;
- break;
- default:
- break;
+ if (pdata->enet_id == XGENE_ENET1) {
+ switch (pdata->port_id) {
+ case 0:
+ pdata->cpu_bufnum = START_CPU_BUFNUM_0;
+ pdata->eth_bufnum = START_ETH_BUFNUM_0;
+ pdata->bp_bufnum = START_BP_BUFNUM_0;
+ pdata->ring_num = START_RING_NUM_0;
+ break;
+ case 1:
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
+ pdata->ring_num = XG_START_RING_NUM_1;
+ } else {
+ pdata->cpu_bufnum = START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = START_BP_BUFNUM_1;
+ pdata->ring_num = START_RING_NUM_1;
+ }
+ break;
+ default:
+ break;
+ }
+ pdata->ring_ops = &xgene_ring1_ops;
+ } else {
+ switch (pdata->port_id) {
+ case 0:
+ pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
+ pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
+ pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
+ pdata->ring_num = X2_START_RING_NUM_0;
+ break;
+ case 1:
+ pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
+ pdata->ring_num = X2_START_RING_NUM_1;
+ break;
+ default:
+ break;
+ }
+ pdata->rm = RM0;
+ pdata->ring_ops = &xgene_ring2_ops;
}
-
}
static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
@@ -1086,6 +1430,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
struct xgene_enet_pdata *pdata;
struct device *dev = &pdev->dev;
struct xgene_mac_ops *mac_ops;
+ const struct of_device_id *of_id;
int ret;
ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
@@ -1102,7 +1447,26 @@ static int xgene_enet_probe(struct platform_device *pdev)
xgene_enet_set_ethtool_ops(ndev);
ndev->features |= NETIF_F_IP_CSUM |
NETIF_F_GSO |
- NETIF_F_GRO;
+ NETIF_F_GRO |
+ NETIF_F_SG;
+
+ of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
+ if (of_id) {
+ pdata->enet_id = (enum xgene_enet_id)of_id->data;
+ }
+#ifdef CONFIG_ACPI
+ else {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
+ if (acpi_id)
+ pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
+ }
+#endif
+ if (!pdata->enet_id) {
+ free_netdev(ndev);
+ return -ENODEV;
+ }
ret = xgene_enet_get_resources(pdata);
if (ret)
@@ -1110,11 +1474,11 @@ static int xgene_enet_probe(struct platform_device *pdev)
xgene_enet_setup_ops(pdata);
- ret = register_netdev(ndev);
- if (ret) {
- netdev_err(ndev, "Failed to register netdev\n");
- goto err;
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ ndev->features |= NETIF_F_TSO;
+ pdata->mss = XGENE_ENET_MSS;
}
+ ndev->hw_features = ndev->features;
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
@@ -1122,18 +1486,27 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err;
}
+ ret = register_netdev(ndev);
+ if (ret) {
+ netdev_err(ndev, "Failed to register netdev\n");
+ goto err;
+ }
+
ret = xgene_enet_init_hw(pdata);
if (ret)
goto err;
- xgene_enet_napi_add(pdata);
mac_ops = pdata->mac_ops;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
ret = xgene_enet_mdio_config(pdata);
- else
+ if (ret)
+ goto err;
+ } else {
INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
+ }
- return ret;
+ xgene_enet_napi_add(pdata);
+ return 0;
err:
unregister_netdev(ndev);
free_netdev(ndev);
@@ -1154,9 +1527,10 @@ static int xgene_enet_remove(struct platform_device *pdev)
mac_ops->tx_disable(pdata);
xgene_enet_napi_del(pdata);
- xgene_enet_mdio_remove(pdata);
- xgene_enet_delete_desc_rings(pdata);
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ xgene_enet_mdio_remove(pdata);
unregister_netdev(ndev);
+ xgene_enet_delete_desc_rings(pdata);
pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
@@ -1165,9 +1539,12 @@ static int xgene_enet_remove(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_enet_acpi_match[] = {
- { "APMC0D05", },
- { "APMC0D30", },
- { "APMC0D31", },
+ { "APMC0D05", XGENE_ENET1},
+ { "APMC0D30", XGENE_ENET1},
+ { "APMC0D31", XGENE_ENET1},
+ { "APMC0D3F", XGENE_ENET1},
+ { "APMC0D26", XGENE_ENET2},
+ { "APMC0D25", XGENE_ENET2},
{ }
};
MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
@@ -1175,9 +1552,11 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id xgene_enet_of_match[] = {
- {.compatible = "apm,xgene-enet",},
- {.compatible = "apm,xgene1-sgenet",},
- {.compatible = "apm,xgene1-xgenet",},
+ {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
+ {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
{},
};
diff --git a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 8f3d232b0..1aa72c787 100644
--- a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -35,12 +35,17 @@
#include <linux/if_vlan.h>
#include <linux/phy.h>
#include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
#define XGENE_DRV_VERSION "v1.0"
#define XGENE_ENET_MAX_MTU 1536
#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
+#define BUFLEN_16K (16 * 1024)
#define NUM_PKT_BUF 64
#define NUM_BUFPOOL 32
+#define MAX_EXP_BUFFS 256
+#define XGENE_ENET_MSS 1448
+#define XGENE_MIN_ENET_FRAME_SIZE 60
#define START_CPU_BUFNUM_0 0
#define START_ETH_BUFNUM_0 2
@@ -51,12 +56,31 @@
#define START_BP_BUFNUM_1 0x2A
#define START_RING_NUM_1 264
+#define XG_START_CPU_BUFNUM_1 12
+#define XG_START_ETH_BUFNUM_1 2
+#define XG_START_BP_BUFNUM_1 0x22
+#define XG_START_RING_NUM_1 264
+
+#define X2_START_CPU_BUFNUM_0 0
+#define X2_START_ETH_BUFNUM_0 0
+#define X2_START_BP_BUFNUM_0 0x20
+#define X2_START_RING_NUM_0 0
+#define X2_START_CPU_BUFNUM_1 0xc
+#define X2_START_ETH_BUFNUM_1 0
+#define X2_START_BP_BUFNUM_1 0x20
+#define X2_START_RING_NUM_1 256
+
#define IRQ_ID_SIZE 16
#define XGENE_MAX_TXC_RINGS 1
#define PHY_POLL_LINK_ON (10 * HZ)
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
+enum xgene_enet_id {
+ XGENE_ENET1 = 1,
+ XGENE_ENET2
+};
+
/* software context of a descriptor ring */
struct xgene_enet_desc_ring {
struct net_device *ndev;
@@ -64,18 +88,22 @@ struct xgene_enet_desc_ring {
u16 num;
u16 head;
u16 tail;
+ u16 exp_buf_tail;
u16 slots;
u16 irq;
char irq_name[IRQ_ID_SIZE];
u32 size;
- u32 state[NUM_RING_CONFIG];
+ u32 state[X2_NUM_RING_CONFIG];
void __iomem *cmd_base;
void __iomem *cmd;
dma_addr_t dma;
+ dma_addr_t irq_mbox_dma;
+ void *irq_mbox_addr;
u16 dst_ring_num;
u8 nbufpool;
struct sk_buff *(*rx_skb);
struct sk_buff *(*cp_skb);
+ dma_addr_t *frag_dma_addr;
enum xgene_enet_ring_cfgsize cfgsize;
struct xgene_enet_desc_ring *cp_ring;
struct xgene_enet_desc_ring *buf_pool;
@@ -85,6 +113,7 @@ struct xgene_enet_desc_ring {
struct xgene_enet_raw_desc *raw_desc;
struct xgene_enet_raw_desc16 *raw_desc16;
};
+ __le64 *exp_bufs;
};
struct xgene_mac_ops {
@@ -95,6 +124,7 @@ struct xgene_mac_ops {
void (*tx_disable)(struct xgene_enet_pdata *pdata);
void (*rx_disable)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
+ void (*set_mss)(struct xgene_enet_pdata *pdata);
void (*link_state)(struct work_struct *work);
};
@@ -105,6 +135,15 @@ struct xgene_port_ops {
void (*shutdown)(struct xgene_enet_pdata *pdata);
};
+struct xgene_ring_ops {
+ u8 num_ring_config;
+ u8 num_ring_id_shift;
+ struct xgene_enet_desc_ring * (*setup)(struct xgene_enet_desc_ring *);
+ void (*clear)(struct xgene_enet_desc_ring *);
+ void (*wr_cmd)(struct xgene_enet_desc_ring *, int);
+ u32 (*len)(struct xgene_enet_desc_ring *);
+};
+
/* ethernet private data */
struct xgene_enet_pdata {
struct net_device *ndev;
@@ -113,13 +152,14 @@ struct xgene_enet_pdata {
int phy_speed;
struct clk *clk;
struct platform_device *pdev;
+ enum xgene_enet_id enet_id;
struct xgene_enet_desc_ring *tx_ring;
struct xgene_enet_desc_ring *rx_ring;
+ u16 tx_level;
+ u16 txc_level;
char *dev_name;
u32 rx_buff_cnt;
u32 tx_qcnt_hi;
- u32 cp_qcnt_hi;
- u32 cp_qcnt_low;
u32 rx_irq;
u32 txc_irq;
u8 cq_cnt;
@@ -136,12 +176,16 @@ struct xgene_enet_pdata {
struct rtnl_link_stats64 stats;
struct xgene_mac_ops *mac_ops;
struct xgene_port_ops *port_ops;
+ struct xgene_ring_ops *ring_ops;
struct delayed_work link_work;
u32 port_id;
u8 cpu_bufnum;
u8 eth_bufnum;
u8 bp_bufnum;
u16 ring_num;
+ u32 mss;
+ u8 tx_delay;
+ u8 rx_delay;
};
struct xgene_indirect_ctl {
@@ -176,6 +220,9 @@ static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
#define GET_VAL(field, src) \
xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
+#define GET_BIT(field, src) \
+ xgene_enet_get_field_value(field ## _POS, 1, src)
+
static inline struct device *ndev_to_dev(struct net_device *ndev)
{
return ndev->dev.parent;
diff --git a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
new file mode 100644
index 000000000..0b6896bb3
--- /dev/null
+++ b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -0,0 +1,200 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
+
+static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
+{
+ u32 *ring_cfg = ring->state;
+ u64 addr = ring->dma;
+
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+ ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
+ ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
+ }
+ ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1);
+
+ addr >>= 8;
+ ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
+
+ addr >>= 27;
+ ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
+ | ACCEPTLERR
+ | SET_VAL(RINGADDRH, addr);
+ ring_cfg[4] |= SET_VAL(X2_SELTHRSH, 1);
+ ring_cfg[5] |= SET_BIT(X2_QBASE_AM) | SET_BIT(X2_MSG_AM);
+}
+
+static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
+{
+ u32 *ring_cfg = ring->state;
+ bool is_bufpool;
+ u32 val;
+
+ is_bufpool = xgene_enet_is_bufpool(ring->id);
+ val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
+ ring_cfg[4] |= SET_VAL(X2_RINGTYPE, val);
+ if (is_bufpool)
+ ring_cfg[3] |= SET_VAL(RINGMODE, BUFPOOL_MODE);
+}
+
+static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
+{
+ u32 *ring_cfg = ring->state;
+
+ ring_cfg[3] |= RECOMBBUF;
+ ring_cfg[4] |= SET_VAL(X2_RECOMTIMEOUT, 0x7);
+}
+
+static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
+ u32 offset, u32 data)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+ iowrite32(data, pdata->ring_csr_addr + offset);
+}
+
+static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+ int i;
+
+ xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
+ for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
+ xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
+ ring->state[i]);
+ }
+}
+
+static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
+{
+ memset(ring->state, 0, sizeof(ring->state));
+ xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
+{
+ enum xgene_ring_owner owner;
+
+ xgene_enet_ring_set_type(ring);
+
+ owner = xgene_enet_ring_owner(ring->id);
+ if (owner == RING_OWNER_ETH0 || owner == RING_OWNER_ETH1)
+ xgene_enet_ring_set_recombbuf(ring);
+
+ xgene_enet_ring_init(ring);
+ xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
+{
+ u32 ring_id_val, ring_id_buf;
+ bool is_bufpool;
+
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
+ return;
+
+ is_bufpool = xgene_enet_is_bufpool(ring->id);
+
+ ring_id_val = ring->id & GENMASK(9, 0);
+ ring_id_val |= OVERWRITE;
+
+ ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
+ ring_id_buf |= PREFETCH_BUF_EN;
+ if (is_bufpool)
+ ring_id_buf |= IS_BUFFER_POOL;
+
+ xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
+ xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
+}
+
+static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
+{
+ u32 ring_id;
+
+ ring_id = ring->id | OVERWRITE;
+ xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
+ xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
+}
+
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+ struct xgene_enet_desc_ring *ring)
+{
+ bool is_bufpool;
+ u32 addr, i;
+
+ xgene_enet_clr_ring_state(ring);
+ xgene_enet_set_ring_state(ring);
+ xgene_enet_set_ring_id(ring);
+
+ ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
+
+ is_bufpool = xgene_enet_is_bufpool(ring->id);
+ if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
+ return ring;
+
+ addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
+ xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
+
+ for (i = 0; i < ring->slots; i++)
+ xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
+
+ return ring;
+}
+
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+{
+ xgene_enet_clr_desc_ring_id(ring);
+ xgene_enet_clr_ring_state(ring);
+}
+
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+ u32 data = 0;
+
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+ data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
+ INTR_CLEAR;
+ }
+ data |= (count & GENMASK(16, 0));
+
+ iowrite32(data, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+ u32 __iomem *cmd_base = ring->cmd_base;
+ u32 ring_state, num_msgs;
+
+ ring_state = ioread32(&cmd_base[1]);
+ num_msgs = GET_VAL(X2_NUMMSGSINQ, ring_state);
+
+ return num_msgs;
+}
+
+struct xgene_ring_ops xgene_ring2_ops = {
+ .num_ring_config = X2_NUM_RING_CONFIG,
+ .num_ring_id_shift = 13,
+ .setup = xgene_enet_setup_ring,
+ .clear = xgene_enet_clear_ring,
+ .wr_cmd = xgene_enet_wr_cmd,
+ .len = xgene_enet_ring_len,
+};
diff --git a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
new file mode 100644
index 000000000..8b235db23
--- /dev/null
+++ b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
@@ -0,0 +1,49 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_RING2_H__
+#define __XGENE_ENET_RING2_H__
+
+#include "xgene_enet_main.h"
+
+#define X2_NUM_RING_CONFIG 6
+
+#define INTR_MBOX_SIZE 1024
+#define CSR_VMID0_INTR_MBOX 0x0270
+#define INTR_CLEAR BIT(23)
+
+#define X2_MSG_AM_POS 10
+#define X2_QBASE_AM_POS 11
+#define X2_INTLINE_POS 24
+#define X2_INTLINE_LEN 5
+#define X2_CFGCRID_POS 29
+#define X2_CFGCRID_LEN 3
+#define X2_SELTHRSH_POS 7
+#define X2_SELTHRSH_LEN 3
+#define X2_RINGTYPE_POS 23
+#define X2_RINGTYPE_LEN 2
+#define X2_DEQINTEN_POS 29
+#define X2_RECOMTIMEOUT_POS 0
+#define X2_RECOMTIMEOUT_LEN 7
+#define X2_NUMMSGSINQ_POS 0
+#define X2_NUMMSGSINQ_LEN 17
+
+extern struct xgene_ring_ops xgene_ring2_ops;
+
+#endif /* __XGENE_ENET_RING2_H__ */
diff --git a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index f27fb6f2a..05b817e56 100644
--- a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -21,6 +21,7 @@
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_sgmac.h"
+#include "xgene_enet_xgmac.h"
static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
{
@@ -39,6 +40,14 @@ static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
iowrite32(val, p->eth_diag_csr_addr + offset);
}
+static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
+ u32 offset, u32 val)
+{
+ void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+ iowrite32(val, addr);
+}
+
static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
u32 wr_addr, u32 wr_data)
{
@@ -140,8 +149,9 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
{
- u32 val = 0xffffffff;
+ u32 val;
+ val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
}
@@ -227,6 +237,8 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
{
u32 data, loop = 10;
u32 offset = p->port_id * 4;
+ u32 enet_spare_cfg_reg, rsif_config_reg;
+ u32 cfg_bypass_reg, rx_dv_gate_reg;
xgene_sgmac_reset(p);
@@ -239,7 +251,7 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
SGMII_STATUS_ADDR >> 2);
if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
break;
- usleep_range(10, 20);
+ usleep_range(1000, 2000);
}
if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
netdev_err(p->ndev, "Auto-negotiation failed\n");
@@ -249,33 +261,38 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
- data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR);
+ if (p->enet_id == XGENE_ENET1) {
+ enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
+ rsif_config_reg = RSIF_CONFIG_REG_ADDR;
+ cfg_bypass_reg = CFG_BYPASS_ADDR;
+ rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR;
+ } else {
+ enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
+ rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
+ cfg_bypass_reg = XG_CFG_BYPASS_ADDR;
+ rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR;
+ }
+
+ data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
data |= MPA_IDLE_WITH_QMI_EMPTY;
- xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data);
+ xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
xgene_sgmac_set_mac_addr(p);
- data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR);
- data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
- xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data);
-
/* Adjust MDC clock frequency */
data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
MGMT_CLOCK_SEL_SET(&data, 7);
xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
/* Enable drop if bufpool not available */
- data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR);
+ data = xgene_enet_rd_csr(p, rsif_config_reg);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
- xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data);
-
- /* Rtype should be copied from FP */
- xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
+ xgene_enet_wr_csr(p, rsif_config_reg, data);
/* Bypass traffic gating */
- xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0);
- xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
- xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0);
+ xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
+ xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
+ xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg + offset, RESUME_RX0);
}
static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
@@ -317,9 +334,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *p)
if (!xgene_ring_mgr_init(p))
return -ENODEV;
- clk_prepare_enable(p->clk);
- clk_disable_unprepare(p->clk);
- clk_prepare_enable(p->clk);
+ if (!IS_ERR(p->clk)) {
+ clk_prepare_enable(p->clk);
+ clk_disable_unprepare(p->clk);
+ clk_prepare_enable(p->clk);
+ }
xgene_enet_ecc_init(p);
xgene_enet_config_ring_if_assoc(p);
@@ -331,19 +350,29 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
u32 dst_ring_num, u16 bufpool_id)
{
u32 data, fpsel;
+ u32 cle_bypass_reg0, cle_bypass_reg1;
u32 offset = p->port_id * MAC_OFFSET;
+ if (p->enet_id == XGENE_ENET1) {
+ cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
+ cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR;
+ } else {
+ cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR;
+ cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR;
+ }
+
data = CFG_CLE_BYPASS_EN0;
- xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data);
+ xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
- xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data);
+ xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
}
static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
{
- clk_disable_unprepare(p->clk);
+ if (!IS_ERR(p->clk))
+ clk_disable_unprepare(p->clk);
}
static void xgene_enet_link_state(struct work_struct *work)
diff --git a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index a18a9d1f1..7a28a48cb 100644
--- a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -122,7 +122,6 @@ static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
return true;
}
-
static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
u32 rd_addr, u32 *rd_data)
{
@@ -185,6 +184,11 @@ static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
}
+static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata)
+{
+ xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR, pdata->mss);
+}
+
static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
{
u32 data;
@@ -205,8 +209,8 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
data &= ~HSTLENCHK;
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
- xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, 0x06000600);
xgene_xgmac_set_mac_addr(pdata);
+ xgene_xgmac_set_mss(pdata);
xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
@@ -257,9 +261,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
if (!xgene_ring_mgr_init(pdata))
return -ENODEV;
- clk_prepare_enable(pdata->clk);
- clk_disable_unprepare(pdata->clk);
- clk_prepare_enable(pdata->clk);
+ if (!IS_ERR(pdata->clk)) {
+ clk_prepare_enable(pdata->clk);
+ clk_disable_unprepare(pdata->clk);
+ clk_prepare_enable(pdata->clk);
+ }
xgene_enet_ecc_init(pdata);
xgene_enet_config_ring_if_assoc(pdata);
@@ -286,7 +292,8 @@ static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
{
- clk_disable_unprepare(pdata->clk);
+ if (!IS_ERR(pdata->clk))
+ clk_disable_unprepare(pdata->clk);
}
static void xgene_enet_link_state(struct work_struct *work)
@@ -327,6 +334,7 @@ struct xgene_mac_ops xgene_xgmac_ops = {
.rx_disable = xgene_xgmac_rx_disable,
.tx_disable = xgene_xgmac_tx_disable,
.set_mac_addr = xgene_xgmac_set_mac_addr,
+ .set_mss = xgene_xgmac_set_mss,
.link_state = xgene_enet_link_state
};
diff --git a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index 5a5296a6d..f8f908dbf 100644
--- a/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/kernel/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -21,9 +21,28 @@
#ifndef __XGENE_ENET_XGMAC_H__
#define __XGENE_ENET_XGMAC_H__
+#define X2_BLOCK_ETH_MAC_CSR_OFFSET 0x3000
#define BLOCK_AXG_MAC_OFFSET 0x0800
#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000
+#define XGENET_CONFIG_REG_ADDR 0x20
+#define XGENET_SRST_ADDR 0x00
+#define XGENET_CLKEN_ADDR 0x08
+
+#define CSR_CLK BIT(0)
+#define XGENET_CLK BIT(1)
+#define PCS_CLK BIT(3)
+#define AN_REF_CLK BIT(4)
+#define AN_CLK BIT(5)
+#define AD_CLK BIT(6)
+
+#define CSR_RST BIT(0)
+#define XGENET_RST BIT(1)
+#define PCS_RST BIT(3)
+#define AN_REF_RST BIT(4)
+#define AN_RST BIT(5)
+#define AD_RST BIT(6)
+
#define AXGMAC_CONFIG_0 0x0000
#define AXGMAC_CONFIG_1 0x0004
#define HSTMACRST BIT(31)
@@ -38,11 +57,14 @@
#define HSTMACADR_MSW_ADDR 0x0014
#define HSTMAXFRAME_LENGTH_ADDR 0x0020
+#define XG_MCX_RX_DV_GATE_REG_0_ADDR 0x0004
#define XG_RSIF_CONFIG_REG_ADDR 0x00a0
#define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164
#define XG_CFG_BYPASS_ADDR 0x0204
+#define XG_CFG_LINK_AGGR_RESUME_0_ADDR 0x0214
#define XG_LINK_STATUS_ADDR 0x0228
+#define XG_TSIF_MSS_REG0_ADDR 0x02a4
#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c
#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410
#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804