diff mbox

[v5,4/4] drivers: net: Add APM X-Gene SoC ethernet driver support.

Message ID 1401737954-26152-5-git-send-email-isubramanian@apm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Iyappan Subramanian June 2, 2014, 7:39 p.m. UTC
This patch adds network driver for APM X-Gene SoC ethernet.

Signed-off-by: Iyappan Subramanian <isubramanian@apm.com>
Signed-off-by: Ravi Patel <rapatel@apm.com>
Signed-off-by: Keyur Chudgar <kchudgar@apm.com>
---
 drivers/net/ethernet/Kconfig                     |    1 +
 drivers/net/ethernet/Makefile                    |    1 +
 drivers/net/ethernet/apm/Kconfig                 |    1 +
 drivers/net/ethernet/apm/Makefile                |    5 +
 drivers/net/ethernet/apm/xgene/Kconfig           |    9 +
 drivers/net/ethernet/apm/xgene/Makefile          |    6 +
 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c   |  901 ++++++++++++++++++++
 drivers/net/ethernet/apm/xgene/xgene_enet_hw.h   |  395 +++++++++
 drivers/net/ethernet/apm/xgene/xgene_enet_main.c |  959 ++++++++++++++++++++++
 drivers/net/ethernet/apm/xgene/xgene_enet_main.h |  131 +++
 10 files changed, 2409 insertions(+)
 create mode 100644 drivers/net/ethernet/apm/Kconfig
 create mode 100644 drivers/net/ethernet/apm/Makefile
 create mode 100644 drivers/net/ethernet/apm/xgene/Kconfig
 create mode 100644 drivers/net/ethernet/apm/xgene/Makefile
 create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
 create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
 create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_main.c
 create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_main.h

Comments

David Miller June 5, 2014, 7:45 a.m. UTC | #1
From: Iyappan Subramanian <isubramanian@apm.com>
Date: Mon,  2 Jun 2014 12:39:14 -0700

> +	netdev_err(ndev, "LERR: %d ring_num: %d ", status, ring->num);
> +	switch (status) {
> +	case HBF_READ_DATA:
> +		netdev_err(ndev, "HBF read data error\n");
> +		break;

This is not really appropriate.

We have statistics like the ones you are incrementing in this
function as the mechanism people can use to learn what events
happened on an interface, and how many times they happened.

Therefore, emitting a log message for each one of those events too is
not necessary.

We don't emit a netdev_err() for every packet that the IPv4 stack
drops due to a bad checksum, for example.

Please get rid of this.
Iyappan Subramanian June 12, 2014, 1:59 a.m. UTC | #2
On Thu, Jun 5, 2014 at 12:45 AM, David Miller <davem@davemloft.net> wrote:
> From: Iyappan Subramanian <isubramanian@apm.com>
> Date: Mon,  2 Jun 2014 12:39:14 -0700
>
>> +     netdev_err(ndev, "LERR: %d ring_num: %d ", status, ring->num);
>> +     switch (status) {
>> +     case HBF_READ_DATA:
>> +             netdev_err(ndev, "HBF read data error\n");
>> +             break;
>
> This is not really appropriate.
>
> We have statistics like the ones you are incrementing in this
> function as the mechanism people can use to learn what events
> happened on an interface, and how many times they happened.
>
> Therefore, emitting a log message for each one of those events too is
> not necessary.
>
> We don't emit a netdev_err() for every packet that the IPv4 stack
> drops due to a bad checksum, for example.
>
> Please get rid of this.

Sure.  I will remove the error message prints and clean up the function.

>
diff mbox

Patch

diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index d740101..7059f26 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -24,6 +24,7 @@  source "drivers/net/ethernet/allwinner/Kconfig"
 source "drivers/net/ethernet/alteon/Kconfig"
 source "drivers/net/ethernet/altera/Kconfig"
 source "drivers/net/ethernet/amd/Kconfig"
+source "drivers/net/ethernet/apm/Kconfig"
 source "drivers/net/ethernet/apple/Kconfig"
 source "drivers/net/ethernet/arc/Kconfig"
 source "drivers/net/ethernet/atheros/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 35190e3..620415b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,6 +10,7 @@  obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
 obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
 obj-$(CONFIG_ALTERA_TSE) += altera/
 obj-$(CONFIG_NET_VENDOR_AMD) += amd/
+obj-$(CONFIG_NET_XGENE) += apm/
 obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
 obj-$(CONFIG_NET_VENDOR_ARC) += arc/
 obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
diff --git a/drivers/net/ethernet/apm/Kconfig b/drivers/net/ethernet/apm/Kconfig
new file mode 100644
index 0000000..ec63d70
--- /dev/null
+++ b/drivers/net/ethernet/apm/Kconfig
@@ -0,0 +1 @@ 
+source "drivers/net/ethernet/apm/xgene/Kconfig"
diff --git a/drivers/net/ethernet/apm/Makefile b/drivers/net/ethernet/apm/Makefile
new file mode 100644
index 0000000..65ce32a
--- /dev/null
+++ b/drivers/net/ethernet/apm/Makefile
@@ -0,0 +1,5 @@ 
+#
+# Makefile for APM X-GENE Ethernet driver.
+#
+
+obj-$(CONFIG_NET_XGENE) += xgene/
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
new file mode 100644
index 0000000..616dff6
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -0,0 +1,9 @@ 
+config NET_XGENE
+	tristate "APM X-Gene SoC Ethernet Driver"
+	select PHYLIB
+	help
+	  This is the Ethernet driver for the on-chip ethernet interface on the
+	  APM X-Gene SoC.
+
+	  To compile this driver as a module, choose M here. This module will
+	  be called xgene_enet.
diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile
new file mode 100644
index 0000000..60de5fa
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/Makefile
@@ -0,0 +1,6 @@ 
+#
+# Makefile for APM X-Gene Ethernet Driver.
+#
+
+xgene-enet-objs := xgene_enet_hw.o xgene_enet_main.o
+obj-$(CONFIG_NET_XGENE) += xgene-enet.o
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
new file mode 100644
index 0000000..2ef7a08
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -0,0 +1,901 @@ 
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Ravi Patel <rapatel@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+
+u64 xgene_prepare_eth_work_msg(u8 l4hlen, u8 l3hlen, u8 ethhdr,
+			       u8 csum_enable, u8 proto)
+{
+	u64 hopinfo;
+
+	hopinfo = (l4hlen & TCPHDR_MASK) |
+		  ((l3hlen << IPHDR_POS) & IPHDR_MASK) |
+		  (ethhdr << ETHHDR_POS) |
+		  ((csum_enable << EC_POS) & EC_MASK) |
+		  ((proto << IS_POS) & IS_MASK) |
+		  INSERT_CRC |
+		  TYPE_ETH_WORK_MESSAGE;
+
+	return hopinfo;
+}
+
+/* Tx descriptor raw write */
+void xgene_set_tx_desc(struct xgene_enet_desc_ring *ring,
+		       struct xgene_enet_raw_desc *raw_desc)
+{
+	raw_desc->m0 = ring->desc.userinfo;
+	raw_desc->m1 = (ring->desc.dataaddr & DATAADDR_MASK) |
+		       (((u64)ring->desc.bufdatalen << BUFDATALEN_POS) &
+		       BUFDATALEN_MASK) | COHERENT_MASK;
+	raw_desc->m3 = (((u64)ring->desc.henqnum << HENQNUM_POS) &
+		       HENQNUM_MASK) |
+		       ring->desc.hopinfo_lsb;
+}
+
+/* descriptor raw read */
+void xgene_get_desc(struct xgene_enet_desc_ring *ring,
+		    struct xgene_enet_raw_desc *raw_desc)
+{
+	struct xgene_enet_desc *desc = &ring->desc;
+
+	desc->dataaddr = raw_desc->m1 & DATAADDR_MASK;
+	desc->bufdatalen = (raw_desc->m1 & BUFDATALEN_MASK) >> BUFDATALEN_POS;
+	desc->userinfo = raw_desc->m0 & USERINFO_MASK;
+	desc->fpqnum = (raw_desc->m0 & FPQNUM_MASK) >> FPQNUM_POS;
+	desc->status = (raw_desc->m0 & LERR_MASK) >> LERR_POS;
+}
+
+/* Bufpool descriptor raw write common fields */
+void xgene_set_init_bufpool_desc(struct xgene_enet_desc_ring *ring,
+				 struct xgene_enet_raw_desc16 *raw_desc)
+{
+	raw_desc->m0 = (ring->desc.userinfo) |
+		       (((u64)ring->desc.fpqnum << FPQNUM_POS) & FPQNUM_MASK) |
+		       STASHING_MASK;
+}
+
+/* Bufpool descriptor raw write */
+void xgene_set_refill_bufpool_desc(struct xgene_enet_desc_ring *ring,
+				   struct xgene_enet_raw_desc16 *raw_desc)
+{
+	raw_desc->m1 = (ring->desc.dataaddr & DATAADDR_MASK) |
+		       (((u64)ring->desc.bufdatalen << BUFDATALEN_POS) &
+		       BUFDATALEN_MASK) |
+		       COHERENT_MASK;
+}
+
+/* Bufpool descriptor raw read */
+void xgene_get_bufpool_desc(struct xgene_enet_desc_ring *ring,
+			    struct xgene_enet_raw_desc16 *raw_desc)
+{
+	struct xgene_enet_desc *desc = &ring->desc;
+
+	desc->userinfo = raw_desc->m0 & USERINFO_MASK;
+}
+
+static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
+{
+	u32 *ring_cfg = ring->state;
+	u64 addr = ring->dma;
+	enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
+
+	ring_cfg[4] |= (1 << SELTHRSH_POS) &
+			CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
+	ring_cfg[3] |= ACCEPTLERR;
+	ring_cfg[2] |= QCOHERENT;
+
+	addr >>= 8;
+	ring_cfg[2] |= (addr << RINGADDRL_POS) &
+			CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
+	addr >>= RINGADDRL_LEN;
+	ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
+	ring_cfg[3] |= ((u32) cfgsize << RINGSIZE_POS) &
+			CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
+}
+
+static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
+{
+	u32 *ring_cfg = ring->state;
+	bool is_bufpool;
+	u32 val;
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+	val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
+	ring_cfg[4] |= (val << RINGTYPE_POS) &
+			CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
+
+	if (is_bufpool) {
+		ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
+				CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
+	}
+}
+
+static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
+{
+	u32 *ring_cfg = ring->state;
+
+	ring_cfg[3] |= RECOMBBUF;
+	ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
+			CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
+	ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
+}
+
+static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
+					u32 offset, u32 data)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+	iowrite32(data, pdata->ring_csr_addr + offset);
+}
+
+static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
+					u32 offset, u32 *data)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+	*data = ioread32(pdata->ring_csr_addr + offset);
+}
+
+static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
+{
+	int i;
+
+	xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
+	for (i = 0; i < NUM_RING_CONFIG; i++) {
+		xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
+				     ring->state[i]);
+	}
+}
+
+static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
+{
+	memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG);
+	xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
+{
+	xgene_enet_ring_set_type(ring);
+
+	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0)
+		xgene_enet_ring_set_recombbuf(ring);
+
+	xgene_enet_ring_init(ring);
+	xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
+{
+	u32 ring_id_val, ring_id_buf;
+	bool is_bufpool;
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+
+	ring_id_val = ring->id & GENMASK(9, 0);
+	ring_id_val |= OVERWRITE;
+
+	ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
+	ring_id_buf |= PREFETCH_BUF_EN;
+	if (is_bufpool)
+		ring_id_buf |= IS_BUFFER_POOL;
+
+	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
+	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
+}
+
+static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
+{
+	u32 ring_id;
+
+	ring_id = ring->id | OVERWRITE;
+	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
+	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
+}
+
+struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+					struct xgene_enet_desc_ring *ring)
+{
+	u32 size = ring->size;
+	u32 i, data;
+	u64 *desc;
+	bool is_bufpool;
+
+	xgene_enet_clr_ring_state(ring);
+	xgene_enet_set_ring_state(ring);
+	xgene_enet_set_ring_id(ring);
+
+	ring->slots = xgene_enet_get_numslots(ring->id, size);
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
+		return ring;
+
+	for (i = 0; i < ring->slots; i++) {
+		desc = (u64 *)&ring->raw_desc[i];
+		desc[EMPTY_SLOT_INDEX] = EMPTY_SLOT;
+	}
+
+	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
+	data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
+	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
+
+	return ring;
+}
+
+void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+{
+	u32 data;
+	bool is_bufpool;
+
+	is_bufpool = xgene_enet_is_bufpool(ring->id);
+	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
+		goto out;
+
+	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
+	data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
+	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
+
+out:
+	xgene_enet_clr_desc_ring_id(ring);
+	xgene_enet_clr_ring_state(ring);
+}
+
+void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
+			    struct xgene_enet_pdata *pdata,
+			    enum xgene_enet_err_code status)
+{
+	struct net_device *ndev = ring->ndev;
+	struct xgene_enet_rx_stats *rx_stats = &pdata->stats.rx_stats;
+
+	netdev_err(ndev, "LERR: %d ring_num: %d ", status, ring->num);
+	switch (status) {
+	case HBF_READ_DATA:
+		netdev_err(ndev, "HBF read data error\n");
+		break;
+	case HBF_LL_READ:
+		netdev_err(ndev, "HBF link list read error\n");
+		break;
+	case BAD_WORK_MSG:
+		netdev_err(ndev, "Bad work message\n");
+		break;
+	case BUFPOOL_TIMEOUT:
+		netdev_err(ndev, "Bufpool buffer timeout error\n");
+		break;
+	case INGRESS_CRC:
+		netdev_err(ndev, "Ingress packet had CRC error\n");
+		rx_stats->crc_err++;
+		break;
+	case INGRESS_CHECKSUM:
+		netdev_err(ndev, "Ingress packet had checksum error\n");
+		rx_stats->checksum_err++;
+		break;
+	case INGRESS_TRUNC_FRAME:
+		netdev_err(ndev, "Ingress packet was truncated\n");
+		rx_stats->oversize_pkts++;
+		break;
+	case INGRESS_PKT_LEN:
+		netdev_err(ndev, "Ingress packet length error\n");
+		rx_stats->frm_len_err++;
+		break;
+	case INGRESS_PKT_UNDER:
+		netdev_err(ndev, "Ingress packet size was less than 64 bytes\n");
+		rx_stats->undersize_pkts++;
+		break;
+	case INGRESS_FIFO_OVERRUN:
+		netdev_err(ndev, "Ingress data FIFO overrun error\n");
+		rx_stats->fifo_overrun++;
+		break;
+	case INGRESS_CHECKSUM_COMPUTE:
+		netdev_err(ndev, "Could not compute checksum on ingress pkt\n");
+		rx_stats->checksum_err++;
+		break;
+	default:
+		netdev_err(ndev, "unexpected error\n");
+		break;
+	}
+}
+
+static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
+			      u32 offset, u32 val)
+{
+	void *addr = pdata->eth_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
+				  u32 offset, u32 val)
+{
+	void *addr = pdata->eth_ring_if_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
+				   u32 offset, u32 val)
+{
+	void *addr = pdata->eth_diag_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
+				  u32 offset, u32 val)
+{
+	void *addr = pdata->mcx_mac_csr_addr + offset;
+
+	iowrite32(val, addr);
+}
+
+static bool xgene_enet_wr_indirect(void *addr, void *wr, void *cmd,
+				  void *cmd_done, u32 wr_addr, u32 wr_data)
+{
+	u32 done;
+	u8 wait = 10;
+
+	iowrite32(wr_addr, addr);
+	iowrite32(wr_data, wr);
+	iowrite32(XGENE_ENET_WR_CMD, cmd);
+
+	/* wait for write command to complete */
+	while (!(done = ioread32(cmd_done)) && wait--)
+		udelay(1);
+
+	if (!done)
+		return false;
+
+	iowrite32(0, cmd);
+
+	return true;
+}
+
+static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata,
+				  u32 wr_addr, u32 wr_data)
+{
+	void *addr, *wr, *cmd, *cmd_done;
+	bool ret;
+
+	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
+	wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
+	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
+	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+
+	ret = xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data);
+	if (!ret)
+		netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
+			   wr_addr);
+}
+
+static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
+			      u32 offset, u32 *val)
+{
+	void *addr = pdata->eth_csr_addr + offset;
+
+	*val = ioread32(addr);
+}
+
+static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
+				   u32 offset, u32 *val)
+{
+	void *addr = pdata->eth_diag_csr_addr + offset;
+
+	*val = ioread32(addr);
+}
+
+static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
+				  u32 offset, u32 *val)
+{
+	void *addr = pdata->mcx_mac_csr_addr + offset;
+
+	*val = ioread32(addr);
+}
+
+static bool xgene_enet_rd_indirect(void *addr, void *rd, void *cmd,
+				   void *cmd_done, u32 rd_addr, u32 *rd_data)
+{
+	u32 done;
+	u8 wait = 10;
+
+	iowrite32(rd_addr, addr);
+	iowrite32(XGENE_ENET_RD_CMD, cmd);
+
+	/* wait for read command to complete */
+	while (!(done = ioread32(cmd_done)) && wait--)
+		udelay(1);
+
+	if (!done)
+		return false;
+
+	*rd_data = ioread32(rd);
+	iowrite32(0, cmd);
+
+	return true;
+}
+
+static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
+				  u32 rd_addr, u32 *rd_data)
+{
+	void *addr, *rd, *cmd, *cmd_done;
+	bool ret;
+
+	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
+	rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
+	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
+	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
+
+	ret = xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data);
+	if (!ret)
+		netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
+			   rd_addr);
+}
+
+static void xgene_enet_rd_mcx_stats(struct xgene_enet_pdata *pdata,
+				    u32 rd_addr, u32 *rd_data)
+{
+	void *addr, *rd, *cmd, *cmd_done;
+	bool ret;
+
+	addr = pdata->mcx_stats_addr + STAT_ADDR_REG_OFFSET;
+	rd = pdata->mcx_stats_addr + STAT_READ_REG_OFFSET;
+	cmd = pdata->mcx_stats_addr + STAT_COMMAND_REG_OFFSET;
+	cmd_done = pdata->mcx_stats_addr + STAT_COMMAND_DONE_REG_OFFSET;
+
+	ret = xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data);
+	if (!ret)
+		netdev_err(pdata->ndev, "MCX stats read failed, addr: %04x\n",
+			   rd_addr);
+}
+
+static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id,
+			       u32 reg, u16 data)
+{
+	u32 addr = 0, wr_data = 0;
+	u32 done;
+	u8 wait = 10;
+
+	PHY_ADDR_SET(&addr, phy_id);
+	REG_ADDR_SET(&addr, reg);
+	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
+
+	PHY_CONTROL_SET(&wr_data, data);
+	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data);
+	do {
+		usleep_range(5, 10);
+		xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
+	} while ((done & BUSY_MASK) && wait--);
+
+	if (done & BUSY_MASK) {
+		netdev_err(pdata->ndev, "MII_MGMT write failed\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata,
+			      u8 phy_id, u32 reg)
+{
+	u32 addr = 0;
+	u32 data, done;
+	u8 wait = 10;
+
+	PHY_ADDR_SET(&addr, phy_id);
+	REG_ADDR_SET(&addr, reg);
+	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
+	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
+	do {
+		usleep_range(5, 10);
+		xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
+	} while ((done & BUSY_MASK) && wait--);
+
+	if (done & BUSY_MASK) {
+		netdev_err(pdata->ndev, "MII_MGMT read failed\n");
+		return -1;
+	}
+
+	xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data);
+	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0);
+
+	return data;
+}
+
+void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
+{
+	u32 addr0, addr1;
+	u8 *dev_addr = pdata->ndev->dev_addr;
+
+	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+		(dev_addr[1] << 8) | dev_addr[0];
+	addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
+	addr1 |= pdata->phy_addr & 0xFFFF;
+
+	xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
+	xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
+}
+
+static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
+{
+	struct net_device *ndev = pdata->ndev;
+	u32 data;
+	u8 wait = 10;
+
+	xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
+	do {
+		usleep_range(100, 110);
+		xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
+	} while ((data != 0xffffffff) && wait--);
+
+	if (data != 0xffffffff) {
+		netdev_err(ndev, "Failed to release memory from shutdown\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void xgene_gmac_phy_enable_scan_cycle(struct xgene_enet_pdata *pdata)
+{
+	u32 val;
+
+	xgene_enet_rd_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, &val);
+	SCAN_CYCLE_MASK_SET(&val, 1);
+	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, val);
+
+	/* Program phy address start scan from 0 and register at address 0x1 */
+	xgene_enet_rd_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, &val);
+	PHY_ADDR_SET(&val, pdata->phy_dev->addr);
+	REG_ADDR_SET(&val, MII_BMSR);
+	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, val);
+}
+
+void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
+{
+	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
+	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
+}
+
+void xgene_gmac_init(struct xgene_enet_pdata *pdata, int speed)
+{
+	u32 value, mc2;
+	u32 intf_ctl, rgmii;
+	u32 icm0, icm2;
+
+	xgene_gmac_reset(pdata);
+
+	xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
+	xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
+	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2);
+	xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
+	xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
+
+	switch (speed) {
+	case SPEED_10:
+		ENET_INTERFACE_MODE2_SET(&mc2, 1);
+		CFG_MACMODE_SET(&icm0, 0);
+		CFG_WAITASYNCRD_SET(&icm2, 500);
+		rgmii &= ~CFG_SPEED_1250;
+		break;
+	case SPEED_100:
+		ENET_INTERFACE_MODE2_SET(&mc2, 1);
+		intf_ctl |= ENET_LHD_MODE;
+		CFG_MACMODE_SET(&icm0, 1);
+		CFG_WAITASYNCRD_SET(&icm2, 80);
+		rgmii &= ~CFG_SPEED_1250;
+		break;
+	default:
+		ENET_INTERFACE_MODE2_SET(&mc2, 2);
+		intf_ctl |= ENET_GHD_MODE;
+		CFG_TXCLK_MUXSEL0_SET(&rgmii, 4);
+		xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
+		value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
+		xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
+		break;
+	}
+
+	mc2 |= FULL_DUPLEX2;
+	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
+	xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
+
+	xgene_gmac_set_mac_addr(pdata);
+
+	/* Adjust MDC clock frequency */
+	xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value);
+	MGMT_CLOCK_SEL_SET(&value, 7);
+	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
+
+	/* Enable drop if bufpool not available */
+	xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
+	value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
+	xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
+
+	/* Rtype should be copied from FP */
+	xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
+	xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
+
+	/* Rx-Tx traffic resume */
+	xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
+
+	xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
+	xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
+
+	xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
+	value &= ~TX_DV_GATE_EN0;
+	value &= ~RX_DV_GATE_EN0;
+	value |= RESUME_RX0;
+	xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
+
+	xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
+}
+
+void xgene_gmac_get_tx_stats(struct xgene_enet_pdata *pdata)
+{
+	struct xgene_enet_tx_stats *tx_stats = &pdata->stats.tx_stats;
+
+	xgene_enet_rd_mcx_stats(pdata, TBYT_ADDR, &tx_stats->byte_cntr);
+	xgene_enet_rd_mcx_stats(pdata, TPKT_ADDR, &tx_stats->pkt_cntr);
+	xgene_enet_rd_mcx_stats(pdata, TDRP_ADDR, &tx_stats->drop_frame_cntr);
+	xgene_enet_rd_mcx_stats(pdata, TFCS_ADDR, &tx_stats->fcs_error_cntr);
+	xgene_enet_rd_mcx_stats(pdata, TUND_ADDR,
+				&tx_stats->undersize_frame_cntr);
+}
+
+static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
+{
+	u32 val = 0xffffffff;
+
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
+	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
+}
+
+void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
+			   u32 dst_ring_num, u16 bufpool_id)
+{
+	u32 cb;
+	u32 fpsel;
+
+	fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
+
+	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
+	cb |= CFG_CLE_BYPASS_EN0;
+	CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
+	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
+
+	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
+	CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
+	CFG_CLE_FPSEL0_SET(&cb, fpsel);
+	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
+}
+
+void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
+	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
+}
+
+void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
+	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
+}
+
+void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
+	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
+}
+
+void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
+{
+	u32 data;
+
+	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
+	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
+}
+
+void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+{
+	u32 val;
+
+	clk_prepare_enable(pdata->clk);
+	clk_disable_unprepare(pdata->clk);
+	clk_prepare_enable(pdata->clk);
+	xgene_enet_ecc_init(pdata);
+	xgene_enet_config_ring_if_assoc(pdata);
+
+	/* Enable auto-incr for scanning */
+	xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val);
+	val |= SCAN_AUTO_INCR;
+	MGMT_CLOCK_SEL_SET(&val, 1);
+	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
+}
+
+void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
+{
+	clk_disable_unprepare(pdata->clk);
+}
+
+static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+	struct xgene_enet_pdata *pdata = bus->priv;
+	u32 val;
+
+	val = xgene_mii_phy_read(pdata, mii_id, regnum);
+	netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n",
+		   mii_id, regnum, val);
+
+	return val;
+}
+
+static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+				 u16 val)
+{
+	struct xgene_enet_pdata *pdata = bus->priv;
+	int ret;
+
+	netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n",
+		   mii_id, regnum, val);
+	ret = xgene_mii_phy_write(pdata, mii_id, regnum, val);
+
+	return ret;
+}
+
+static void xgene_enet_adjust_link(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct phy_device *phydev = pdata->phy_dev;
+	bool status_change = false;
+
+	if (phydev->link && pdata->phy_speed != phydev->speed) {
+		xgene_gmac_init(pdata, phydev->speed);
+		pdata->phy_speed = phydev->speed;
+		status_change = true;
+	}
+
+	if (pdata->phy_link != phydev->link) {
+		if (!phydev->link)
+			pdata->phy_speed = 0;
+		pdata->phy_link = phydev->link;
+		status_change = true;
+	}
+
+	if (!status_change)
+		return;
+
+	if (phydev->link) {
+		xgene_gmac_rx_enable(pdata);
+		xgene_gmac_tx_enable(pdata);
+	} else {
+		xgene_gmac_rx_disable(pdata);
+		xgene_gmac_tx_disable(pdata);
+	}
+	phy_print_status(phydev);
+}
+
+static int xgene_enet_phy_connect(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct device_node *phy_np;
+	struct phy_device *phy_dev;
+	struct device *dev = &pdata->pdev->dev;
+
+	phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
+	if (!phy_np) {
+		netdev_dbg(ndev, "No phy-handle found\n");
+		return -ENODEV;
+	}
+
+	phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
+				 0, pdata->phy_mode);
+	if (!phy_dev) {
+		netdev_err(ndev, "Could not connect to PHY\n");
+		return  -ENODEV;
+	}
+
+	pdata->phy_link = 0;
+	pdata->phy_speed = 0;
+	pdata->phy_dev = phy_dev;
+
+	return 0;
+}
+
+int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
+{
+	struct net_device *ndev = pdata->ndev;
+	struct device *dev = &pdata->pdev->dev;
+	struct device_node *child_np;
+	struct device_node *mdio_np = NULL;
+	struct mii_bus *mdio_bus;
+	int ret;
+
+	for_each_child_of_node(dev->of_node, child_np) {
+		if (of_device_is_compatible(child_np, "apm,xgene-mdio")) {
+			mdio_np = child_np;
+			break;
+		}
+	}
+
+	if (!mdio_np) {
+		netdev_dbg(ndev, "No mdio node in the dts\n");
+		return -1;
+	}
+
+	mdio_bus = mdiobus_alloc();
+	if (!mdio_bus)
+		return -ENOMEM;
+
+	mdio_bus->name = "APM X-Gene MDIO bus";
+	mdio_bus->read = xgene_enet_mdio_read;
+	mdio_bus->write = xgene_enet_mdio_write;
+	snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
+		 ndev->name);
+
+	mdio_bus->irq = devm_kcalloc(dev, PHY_MAX_ADDR, sizeof(int),
+				     GFP_KERNEL);
+	if (!mdio_bus->irq) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	mdio_bus->priv = pdata;
+	mdio_bus->parent = &ndev->dev;
+
+	ret = of_mdiobus_register(mdio_bus, mdio_np);
+	if (ret) {
+		netdev_err(ndev, "Failed to register MDIO bus\n");
+		goto err;
+	}
+	pdata->mdio_bus = mdio_bus;
+
+	ret = xgene_enet_phy_connect(ndev);
+	if (ret)
+		goto err;
+	xgene_gmac_phy_enable_scan_cycle(pdata);
+
+	return ret;
+
+err:
+	if (mdio_bus->irq)
+		devm_kfree(dev, mdio_bus->irq);
+	mdiobus_free(mdio_bus);
+
+	return ret;
+}
+
+int xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
+{
+	struct mii_bus *mdio_bus;
+
+	mdio_bus = pdata->mdio_bus;
+	mdiobus_unregister(mdio_bus);
+	mdiobus_free(mdio_bus);
+	pdata->mdio_bus = NULL;
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
new file mode 100644
index 0000000..5a6aa14
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -0,0 +1,395 @@ 
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Ravi Patel <rapatel@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_HW_H__
+#define __XGENE_ENET_HW_H__
+
+#include "xgene_enet_main.h"
+
+struct xgene_enet_pdata;
+struct xgene_enet_stats;
+
+/* clears and then set bits */
+static inline void set_bits(u32 *dst, u32 val, u32 start, u32 len)
+{
+	u32 end = start + len - 1;
+	u32 mask = GENMASK(end, start);
+
+	*dst &= ~mask;
+	*dst |= (val << start) & mask;
+}
+
+static inline u32 get_bits(u32 val, u32 start, u32 end)
+{
+	return (val & GENMASK(end, start)) >> start;
+}
+
+#define CSR_RING_ID		0x0008
+#define OVERWRITE		BIT(31)
+#define IS_BUFFER_POOL		BIT(20)
+#define PREFETCH_BUF_EN		BIT(21)
+#define CSR_RING_ID_BUF		0x000c
+#define CSR_RING_NE_INT_MODE	0x017c
+#define CSR_RING_CONFIG		0x006c
+#define CSR_RING_WR_BASE	0x0070
+#define NUM_RING_CONFIG		5
+#define BUFPOOL_MODE		3
+#define RM3			3
+#define INC_DEC_CMD_ADDR	0x002c
+#define UDP_HDR_SIZE		2
+#define BUF_LEN_CODE_2K		0x5000
+
+#define CREATE_MASK(pos, len)		GENMASK((pos)+(len)-1, (pos))
+#define CREATE_MASK_ULL(pos, len)	GENMASK_ULL((pos)+(len)-1, (pos))
+
+/* Empty slot soft signature */
+#define EMPTY_SLOT_INDEX	1
+#define EMPTY_SLOT		~0ULL
+
+#define WORK_DESC_SIZE		32
+#define BUFPOOL_DESC_SIZE	16
+
+#define RING_OWNER_MASK		GENMASK(9, 6)
+#define RING_BUFNUM_MASK	GENMASK(5, 0)
+
+#define SELTHRSH_POS		3
+#define SELTHRSH_LEN		3
+#define RINGADDRL_POS		5
+#define RINGADDRL_LEN		27
+#define RINGADDRH_POS		0
+#define RINGADDRH_LEN		6
+#define RINGSIZE_POS		23
+#define RINGSIZE_LEN		3
+#define RINGTYPE_POS		19
+#define RINGTYPE_LEN		2
+#define RINGMODE_POS		20
+#define RINGMODE_LEN		3
+#define RECOMTIMEOUTL_POS	28
+#define RECOMTIMEOUTL_LEN	3
+#define RECOMTIMEOUTH_POS	0
+#define RECOMTIMEOUTH_LEN	2
+#define NUMMSGSINQ_POS		1
+#define NUMMSGSINQ_LEN		16
+#define ACCEPTLERR		BIT(19)
+#define QCOHERENT		BIT(4)
+#define RECOMBBUF		BIT(27)
+
+#define BLOCK_ETH_CSR_OFFSET		0x2000
+#define BLOCK_ETH_RING_IF_OFFSET	0x9000
+#define BLOCK_ETH_CLKRST_CSR_OFFSET	0xC000
+#define BLOCK_ETH_DIAG_CSR_OFFSET	0xD000
+
+#define BLOCK_ETH_MAC_OFFSET		0x0000
+#define BLOCK_ETH_STATS_OFFSET		0x0014
+#define BLOCK_ETH_MAC_CSR_OFFSET	0x2800
+
+#define MAC_ADDR_REG_OFFSET		0x00
+#define MAC_COMMAND_REG_OFFSET		0x04
+#define MAC_WRITE_REG_OFFSET		0x08
+#define MAC_READ_REG_OFFSET		0x0c
+#define MAC_COMMAND_DONE_REG_OFFSET	0x10
+
+#define STAT_ADDR_REG_OFFSET		0x00
+#define STAT_COMMAND_REG_OFFSET		0x04
+#define STAT_WRITE_REG_OFFSET		0x08
+#define STAT_READ_REG_OFFSET		0x0c
+#define STAT_COMMAND_DONE_REG_OFFSET	0x10
+
+#define MII_MGMT_CONFIG_ADDR		0x20
+#define MII_MGMT_COMMAND_ADDR		0x24
+#define MII_MGMT_ADDRESS_ADDR		0x28
+#define MII_MGMT_CONTROL_ADDR		0x2c
+#define MII_MGMT_STATUS_ADDR		0x30
+#define MII_MGMT_INDICATORS_ADDR	0x34
+
+#define BUSY_MASK			BIT(0)
+#define READ_CYCLE_MASK			BIT(0)
+#define PHY_CONTROL_SET(dst, val)	set_bits(dst, val, 0, 16)
+
+#define ENET_SPARE_CFG_REG_ADDR		0x0750
+#define RSIF_CONFIG_REG_ADDR		0x0010
+#define RSIF_RAM_DBG_REG0_ADDR		0x0048
+#define RGMII_REG_0_ADDR		0x07e0
+#define CFG_LINK_AGGR_RESUME_0_ADDR	0x07c8
+#define DEBUG_REG_ADDR			0x0700
+#define CFG_BYPASS_ADDR			0x0294
+#define CLE_BYPASS_REG0_0_ADDR		0x0490
+#define CLE_BYPASS_REG1_0_ADDR		0x0494
+#define CFG_RSIF_FPBUFF_TIMEOUT_EN	BIT(31)
+#define RESUME_TX			BIT(0)
+#define CFG_SPEED_1250			BIT(24)
+#define TX_PORT0			BIT(0)
+#define CFG_BYPASS_UNISEC_TX		BIT(2)
+#define CFG_BYPASS_UNISEC_RX		BIT(1)
+#define CFG_CLE_BYPASS_EN0		BIT(31)
+#define CFG_TXCLK_MUXSEL0_SET(dst, val)	set_bits(dst, val, 29, 3)
+
+#define CFG_CLE_IP_PROTOCOL0_SET(dst, val)	set_bits(dst, val, 16, 2)
+#define CFG_CLE_DSTQID0_SET(dst, val)		set_bits(dst, val, 0, 12)
+#define CFG_CLE_FPSEL0_SET(dst, val)		set_bits(dst, val, 16, 4)
+#define CFG_MACMODE_SET(dst, val)		set_bits(dst, val, 18, 2)
+#define CFG_WAITASYNCRD_SET(dst, val)		set_bits(dst, val, 0, 16)
+#define ICM_CONFIG0_REG_0_ADDR		0x0400
+#define ICM_CONFIG2_REG_0_ADDR		0x0410
+#define RX_DV_GATE_REG_0_ADDR		0x05fc
+#define TX_DV_GATE_EN0			BIT(2)
+#define RX_DV_GATE_EN0			BIT(1)
+#define RESUME_RX0			BIT(0)
+#define ENET_CFGSSQMIWQASSOC_ADDR		0xe0
+#define ENET_CFGSSQMIFPQASSOC_ADDR		0xdc
+#define ENET_CFGSSQMIQMLITEFPQASSOC_ADDR	0xf0
+#define ENET_CFGSSQMIQMLITEWQASSOC_ADDR		0xf4
+#define ENET_CFG_MEM_RAM_SHUTDOWN_ADDR		0x70
+#define ENET_BLOCK_MEM_RDY_ADDR			0x74
+#define MAC_CONFIG_1_ADDR			0x00
+#define MAC_CONFIG_2_ADDR			0x04
+#define MAX_FRAME_LEN_ADDR			0x10
+#define INTERFACE_CONTROL_ADDR			0x38
+#define STATION_ADDR0_ADDR			0x40
+#define STATION_ADDR1_ADDR			0x44
+#define SCAN_CYCLE_MASK_SET(dst, src)		set_bits(dst, val, 0, 1)
+#define PHY_ADDR_SET(dst, val)			set_bits(dst, val, 8, 5)
+#define REG_ADDR_SET(dst, val)			set_bits(dst, val, 0, 5)
+#define ENET_INTERFACE_MODE2_SET(dst, val)	set_bits(dst, val, 8, 2)
+#define MGMT_CLOCK_SEL_SET(dst, val)		set_bits(dst, val, 0, 3)
+#define SOFT_RESET1			BIT(31)
+#define TX_EN				BIT(0)
+#define RX_EN				BIT(2)
+#define ENET_LHD_MODE			BIT(25)
+#define ENET_GHD_MODE			BIT(26)
+#define FULL_DUPLEX2			BIT(0)
+#define SCAN_AUTO_INCR			BIT(5)
+#define TBYT_ADDR			0x38
+#define TPKT_ADDR			0x39
+#define TDRP_ADDR			0x45
+#define TFCS_ADDR			0x47
+#define TUND_ADDR			0x4a
+
+#define TSO_IPPROTO_TCP			1
+#define TSO_IPPROTO_UDP			0
+#define	FULL_DUPLEX			2
+
+#define USERINFO_POS			0
+#define USERINFO_LEN			32
+#define FPQNUM_POS			32
+#define FPQNUM_LEN			12
+#define LERR_POS			60
+#define LERR_LEN			3
+#define STASH_POS			52
+#define STASH_LEN			2
+#define BUFDATALEN_POS			48
+#define BUFDATALEN_LEN			12
+#define DATAADDR_POS			0
+#define DATAADDR_LEN			42
+#define COHERENT_POS			63
+#define HENQNUM_POS			48
+#define HENQNUM_LEN			12
+#define TYPESEL_POS			44
+#define TYPESEL_LEN			4
+#define ETHHDR_POS			12
+#define IC_POS				35	/* Insert CRC */
+#define TCPHDR_POS			0
+#define TCPHDR_LEN			6
+#define IPHDR_POS			6
+#define IPHDR_LEN			6
+#define EC_POS				22	/* Enable checksum */
+#define IS_POS				24	/* IP protocol select */
+
+#define DATAADDR_MASK		CREATE_MASK_ULL(DATAADDR_POS, DATAADDR_LEN)
+#define BUFDATALEN_MASK		CREATE_MASK_ULL(BUFDATALEN_POS, BUFDATALEN_LEN)
+#define USERINFO_MASK		CREATE_MASK_ULL(USERINFO_POS, USERINFO_LEN)
+#define FPQNUM_MASK		CREATE_MASK_ULL(FPQNUM_POS, FPQNUM_LEN)
+#define LERR_MASK		CREATE_MASK_ULL(LERR_POS, LERR_LEN)
+#define STASHING_MASK		CREATE_MASK_ULL(STASH_POS, STASH_LEN)
+#define COHERENT_MASK		BIT_ULL(COHERENT_POS)
+#define HENQNUM_MASK		CREATE_MASK_ULL(HENQNUM_POS, HENQNUM_LEN)
+#define TCPHDR_MASK		CREATE_MASK(TCPHDR_POS, TCPHDR_LEN)
+#define IPHDR_MASK		CREATE_MASK(IPHDR_POS, IPHDR_LEN)
+#define EC_MASK			BIT(EC_POS)
+#define IS_MASK			BIT(IS_POS)
+#define INSERT_CRC		BIT_ULL(IC_POS)
+#define TYPE_ETH_WORK_MESSAGE	BIT_ULL(44)
+
+struct xgene_enet_desc {
+	dma_addr_t dataaddr;
+	u16 bufdatalen;
+	u32 userinfo;
+	u64 hopinfo_lsb;
+	u16 henqnum;
+	u16 fpqnum;
+	u8 stash;
+	u8 status;
+};
+
+struct xgene_enet_raw_desc {
+	u64 m0;
+	u64 m1;
+	u64 m2;
+	u64 m3;
+};
+
+struct xgene_enet_raw_desc16 {
+	u64 m0;
+	u64 m1;
+};
+
+static inline void xgene_enet_cpu_to_le64(void *desc_ptr, int count)
+{
+	u64 *desc = desc_ptr;
+	int i;
+
+	for (i = 0; i < count; i++)
+		desc[i] = cpu_to_le64(desc[i]);
+}
+
+static inline void xgene_enet_le64_to_cpu(void *desc_ptr, int count)
+{
+	u64 *desc = desc_ptr;
+	int i;
+
+	for (i = 0; i < count; i++)
+		desc[i] = le64_to_cpu(desc[i]);
+}
+
+static inline void xgene_enet_desc16_to_le64(void *desc_ptr)
+{
+	u64 *desc;
+
+	desc = desc_ptr;
+	desc[1] = cpu_to_le64(desc[1]);
+}
+
+static inline void xgene_enet_le64_to_desc16(void *desc_ptr)
+{
+	u64 *desc;
+
+	desc = desc_ptr;
+	desc[1] = le64_to_cpu(desc[1]);
+}
+
+enum xgene_enet_ring_cfgsize {
+	RING_CFGSIZE_512B,
+	RING_CFGSIZE_2KB,
+	RING_CFGSIZE_16KB,
+	RING_CFGSIZE_64KB,
+	RING_CFGSIZE_512KB,
+	RING_CFGSIZE_INVALID
+};
+
+enum xgene_enet_ring_type {
+	RING_DISABLED,
+	RING_REGULAR,
+	RING_BUFPOOL
+};
+
+enum xgene_ring_owner {
+	RING_OWNER_ETH0,
+	RING_OWNER_CPU = 15,
+	RING_OWNER_INVALID
+};
+
+enum xgene_enet_ring_bufnum {
+	RING_BUFNUM_REGULAR = 0x0,
+	RING_BUFNUM_BUFPOOL = 0x20,
+	RING_BUFNUM_INVALID
+};
+
+enum xgene_enet_cmd {
+	XGENE_ENET_WR_CMD = BIT(31),
+	XGENE_ENET_RD_CMD = BIT(30)
+};
+
+enum xgene_enet_err_code {
+	HBF_READ_DATA = 3,
+	HBF_LL_READ = 4,
+	BAD_WORK_MSG = 6,
+	BUFPOOL_TIMEOUT = 15,
+	INGRESS_CRC = 16,
+	INGRESS_CHECKSUM = 17,
+	INGRESS_TRUNC_FRAME = 18,
+	INGRESS_PKT_LEN = 19,
+	INGRESS_PKT_UNDER = 20,
+	INGRESS_FIFO_OVERRUN = 21,
+	INGRESS_CHECKSUM_COMPUTE = 26,
+	ERR_CODE_INVALID
+};
+
+static inline enum xgene_ring_owner xgene_enet_ring_owner(u16 id)
+{
+	return (id & RING_OWNER_MASK) >> 6;
+}
+
+static inline u8 xgene_enet_ring_bufnum(u16 id)
+{
+	return id & RING_BUFNUM_MASK;
+}
+
+static inline bool xgene_enet_is_bufpool(u16 id)
+{
+	return ((id & RING_BUFNUM_MASK) >= 0x20) ? true : false;
+}
+
+static inline u16 xgene_enet_get_numslots(u16 id, u32 size)
+{
+	bool is_bufpool = xgene_enet_is_bufpool(id);
+
+	return (is_bufpool) ? size / BUFPOOL_DESC_SIZE :
+		      size / WORK_DESC_SIZE;
+
+}
+
+struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+		struct xgene_enet_desc_ring *ring);
+void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring);
+
+void xgene_set_tx_desc(struct xgene_enet_desc_ring *ring,
+		       struct xgene_enet_raw_desc *raw_desc);
+void xgene_get_desc(struct xgene_enet_desc_ring *ring,
+		    struct xgene_enet_raw_desc *raw_desc);
+void xgene_set_init_bufpool_desc(struct xgene_enet_desc_ring *ring,
+				 struct xgene_enet_raw_desc16 *raw_desc);
+void xgene_set_refill_bufpool_desc(struct xgene_enet_desc_ring *ring,
+				   struct xgene_enet_raw_desc16 *raw_desc);
+void xgene_get_bufpool_desc(struct xgene_enet_desc_ring *ring,
+			    struct xgene_enet_raw_desc16 *raw_desc);
+u64 xgene_prepare_eth_work_msg(u8 l4hlen, u8 l3hlen, u8 ethhdr,
+			       u8 csum_enable, u8 proto);
+void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
+			    struct xgene_enet_pdata *pdata,
+			    enum xgene_enet_err_code status);
+
+void xgene_enet_reset(struct xgene_enet_pdata *priv);
+void xgene_gmac_reset(struct xgene_enet_pdata *priv);
+void xgene_gmac_init(struct xgene_enet_pdata *priv, int speed);
+void xgene_gmac_tx_enable(struct xgene_enet_pdata *priv);
+void xgene_gmac_rx_enable(struct xgene_enet_pdata *priv);
+void xgene_gmac_tx_disable(struct xgene_enet_pdata *priv);
+void xgene_gmac_rx_disable(struct xgene_enet_pdata *priv);
+void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata);
+void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
+			   u32 dst_ring_num, u16 bufpool_id);
+void xgene_gport_shutdown(struct xgene_enet_pdata *priv);
+void xgene_gmac_get_tx_stats(struct xgene_enet_pdata *pdata);
+
+int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
+int xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
+
+#endif /* __XGENE_ENET_HW_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
new file mode 100644
index 0000000..a68f47d
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -0,0 +1,959 @@ 
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Ravi Patel <rapatel@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+
+static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
+{
+	struct xgene_enet_raw_desc16 *raw_desc;
+	int i;
+
+	for (i = 0; i < buf_pool->slots; i++) {
+		raw_desc = &buf_pool->raw_desc16[i];
+
+		buf_pool->desc.userinfo = i;
+		buf_pool->desc.fpqnum = buf_pool->dst_ring_num;
+		buf_pool->desc.stash = 1;
+
+		xgene_set_init_bufpool_desc(buf_pool, raw_desc);
+
+		/* Hardware expects descriptor in little endian format */
+		xgene_enet_cpu_to_le64(raw_desc, 4);
+	}
+}
+
+static struct device *ndev_to_dev(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+
+	return &pdata->pdev->dev;
+}
+
+static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
+				     u32 nbuf)
+{
+	struct sk_buff *skb;
+	struct xgene_enet_raw_desc16 *raw_desc;
+	struct net_device *ndev;
+	struct device *dev;
+	dma_addr_t dma_addr;
+	u32 tail = buf_pool->tail;
+	u32 slots = buf_pool->slots - 1;
+	u16 bufdatalen, len;
+	int i;
+
+	ndev = buf_pool->ndev;
+	dev = ndev_to_dev(buf_pool->ndev);
+	bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
+	len = XGENE_ENET_MAX_MTU;
+
+	for (i = 0; i < nbuf; i++) {
+		raw_desc = &buf_pool->raw_desc16[tail];
+
+		skb = netdev_alloc_skb_ip_align(ndev, len);
+		if (unlikely(!skb))
+			return -ENOMEM;
+		buf_pool->rx_skb[tail] = skb;
+
+		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
+		if (dma_mapping_error(dev, dma_addr)) {
+			netdev_err(ndev, "DMA mapping error\n");
+			dev_kfree_skb_any(skb);
+			return -EINVAL;
+		}
+
+		buf_pool->desc.dataaddr = dma_addr;
+		buf_pool->desc.bufdatalen = bufdatalen;
+
+		xgene_set_refill_bufpool_desc(buf_pool, raw_desc);
+
+		xgene_enet_desc16_to_le64(raw_desc);
+		tail = (tail + 1) & slots;
+	}
+
+	iowrite32(nbuf, buf_pool->cmd);
+	buf_pool->tail = tail;
+
+	return 0;
+}
+
+static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+	return ((u16)pdata->rm << 10) | ring->num;
+}
+
+static u8 xgene_enet_hdr_len(const void *data)
+{
+	const struct ethhdr *eth = data;
+
+	return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+	u32 *cmd_base = ring->cmd_base;
+	u32 ring_state, num_msgs;
+
+	ring_state = ioread32(&cmd_base[1]);
+	num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN);
+
+	return num_msgs >> NUMMSGSINQ_POS;
+}
+
+static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
+{
+	struct xgene_enet_raw_desc16 *raw_desc;
+	u32 slots = buf_pool->slots - 1;
+	u32 tail = buf_pool->tail;
+	u32 userinfo;
+	int i, len;
+
+	len = xgene_enet_ring_len(buf_pool);
+	for (i = 0; i < len; i++) {
+		tail = (tail - 1) & slots;
+		raw_desc = &buf_pool->raw_desc16[tail];
+
+		/* Hardware stores descriptor in little endian format */
+		xgene_enet_le64_to_desc16(raw_desc);
+
+		xgene_get_bufpool_desc(buf_pool, raw_desc);
+		userinfo = buf_pool->desc.userinfo;
+		dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
+	}
+
+	iowrite32(-len, buf_pool->cmd);
+	buf_pool->tail = tail;
+}
+
+irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
+{
+	struct xgene_enet_desc_ring *rx_ring = data;
+
+	if (napi_schedule_prep(&rx_ring->napi)) {
+		disable_irq_nosync(irq);
+		__napi_schedule(&rx_ring->napi);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring)
+{
+	struct xgene_enet_desc *desc;
+	struct sk_buff *skb;
+	struct device *dev;
+	u16 skb_index;
+	int ret = 0;
+
+	desc = &cp_ring->desc;
+	skb_index = desc->userinfo;
+	skb = cp_ring->cp_skb[skb_index];
+
+	dev = ndev_to_dev(cp_ring->ndev);
+	dma_unmap_single(dev, desc->dataaddr, desc->bufdatalen, DMA_TO_DEVICE);
+
+	/* Checking for error */
+	if (unlikely(desc->status > 2)) {
+		xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
+				       desc->status);
+		ret = -1;
+	}
+
+	if (likely(skb)) {
+		dev_kfree_skb_any(skb);
+	} else {
+		netdev_err(cp_ring->ndev, "completion skb is NULL\n");
+		ret = -1;
+	}
+
+	return ret;
+}
+
+static u64 xgene_enet_work_msg(struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	u8 l3hlen, l4hlen = 0;
+	u8 csum_enable = 0;
+	u8 proto = 0;
+	u8 ethhdr;
+	u64 hopinfo;
+
+	if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
+	    unlikely(skb->protocol != htons(ETH_P_8021Q)))
+		goto out;
+
+	if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
+		goto out;
+
+	iph = ip_hdr(skb);
+	if (unlikely(iph->frag_off & htons(IP_MF | IP_OFFSET)))
+		goto out;
+
+	if (likely(iph->protocol == IPPROTO_TCP)) {
+		l4hlen = tcp_hdrlen(skb) / 4;
+		csum_enable = 1;
+		proto = TSO_IPPROTO_TCP;
+	} else if (iph->protocol == IPPROTO_UDP) {
+		l4hlen = UDP_HDR_SIZE;
+		csum_enable = 1;
+		proto = TSO_IPPROTO_UDP;
+	}
+out:
+	l3hlen = ip_hdrlen(skb) >> 2;
+	ethhdr = xgene_enet_hdr_len(skb->data);
+	hopinfo = xgene_prepare_eth_work_msg(l4hlen, l3hlen, ethhdr,
+					     csum_enable, proto);
+
+	return hopinfo;
+}
+
+static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
+				     struct sk_buff *skb)
+{
+	struct device *dev = ndev_to_dev(tx_ring->ndev);
+	struct xgene_enet_raw_desc *raw_desc;
+	dma_addr_t dma_addr;
+	u16 tail = tx_ring->tail;
+	u64 hopinfo;
+
+	raw_desc = &tx_ring->raw_desc[tail];
+	memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
+
+	dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, dma_addr)) {
+		netdev_err(tx_ring->ndev, "DMA mapping error\n");
+		return -EINVAL;
+	}
+
+	tx_ring->desc.dataaddr = dma_addr;
+	tx_ring->desc.bufdatalen = skb->len;
+	tx_ring->desc.henqnum = tx_ring->dst_ring_num;
+	tx_ring->desc.userinfo = tail;
+
+	hopinfo = xgene_enet_work_msg(skb);
+	tx_ring->desc.hopinfo_lsb = hopinfo;
+
+	xgene_set_tx_desc(tx_ring, raw_desc);
+
+	/* Hardware expects descriptor in little endian format */
+	xgene_enet_cpu_to_le64(raw_desc, 4);
+	tx_ring->cp_ring->cp_skb[tail] = skb;
+
+	return 0;
+}
+
+static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
+					 struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
+	struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
+	u32 tx_level, cq_level;
+
+	tx_level = xgene_enet_ring_len(tx_ring);
+	cq_level = xgene_enet_ring_len(cp_ring);
+	if (unlikely(tx_level > pdata->tx_qcnt_hi ||
+		     cq_level > pdata->cp_qcnt_hi)) {
+		netif_stop_queue(ndev);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (xgene_enet_setup_tx_desc(tx_ring, skb))
+		return NETDEV_TX_OK;
+
+	iowrite32(1, tx_ring->cmd);
+	skb_tx_timestamp(skb);
+	tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
+
+	return NETDEV_TX_OK;
+}
+
+void xgene_enet_skip_csum(struct sk_buff *skb)
+{
+	struct iphdr *iph = ip_hdr(skb);
+
+	if (!(iph->frag_off & htons(IP_MF | IP_OFFSET)) ||
+	    (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	}
+}
+
+static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring)
+{
+	struct net_device *ndev;
+	struct xgene_enet_pdata *pdata;
+	struct device *dev;
+	struct xgene_enet_desc_ring *buf_pool;
+	u32 datalen, skb_index;
+	struct sk_buff *skb;
+	struct xgene_enet_desc *desc;
+	int ret = 0;
+
+	ndev = rx_ring->ndev;
+	pdata = netdev_priv(ndev);
+	dev = ndev_to_dev(rx_ring->ndev);
+	buf_pool = rx_ring->buf_pool;
+
+	desc = &rx_ring->desc;
+	dma_unmap_single(dev, desc->dataaddr, XGENE_ENET_MAX_MTU,
+			 DMA_FROM_DEVICE);
+
+	skb_index = desc->userinfo;
+	skb = buf_pool->rx_skb[skb_index];
+
+	/* checking for error */
+	if (unlikely(desc->status > 2)) {
+		dev_kfree_skb_any(skb);
+		xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
+				       desc->status);
+		pdata->stats.rx_stats.dropped++;
+		ret = -1;
+		goto out;
+	}
+
+	/* strip off CRC as HW isn't doing this */
+	datalen = desc->bufdatalen;
+	datalen -= 4;
+	prefetch(skb->data - NET_IP_ALIGN);
+	skb_put(skb, datalen);
+
+	skb_checksum_none_assert(skb);
+	skb->protocol = eth_type_trans(skb, ndev);
+	if (likely((ndev->features & NETIF_F_IP_CSUM) &&
+		   skb->protocol == htons(ETH_P_IP))) {
+		xgene_enet_skip_csum(skb);
+	}
+
+	pdata->stats.rx_stats.pkts++;
+	pdata->stats.rx_stats.bytes += datalen;
+	napi_gro_receive(&rx_ring->napi, skb);
+out:
+	if (--rx_ring->nbufpool == 0) {
+		ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
+		rx_ring->nbufpool = NUM_BUFPOOL;
+	}
+
+	return ret;
+}
+
+static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
+				   int budget)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+	struct xgene_enet_raw_desc *raw_desc;
+	u16 head = ring->head;
+	u16 slots = ring->slots - 1;
+	int ret, count = 0;
+
+	do {
+		raw_desc = &ring->raw_desc[head];
+		if (unlikely(((u64 *)raw_desc)[EMPTY_SLOT_INDEX] == EMPTY_SLOT))
+			break;
+
+		/* Hardware stores descriptor in little endian format */
+		xgene_enet_le64_to_cpu(raw_desc, 4);
+		xgene_get_desc(ring, raw_desc);
+		if (ring->desc.fpqnum)
+			ret = xgene_enet_rx_frame(ring);
+		else
+			ret = xgene_enet_tx_completion(ring);
+		((u64 *)raw_desc)[EMPTY_SLOT_INDEX] = EMPTY_SLOT;
+
+		head = (head + 1) & slots;
+		count++;
+
+		if (ret)
+			break;
+	} while (--budget);
+
+	if (likely(count)) {
+		iowrite32(-count, ring->cmd);
+		ring->head = head;
+
+		if (netif_queue_stopped(ring->ndev)) {
+			if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low)
+				netif_wake_queue(ring->ndev);
+		}
+	}
+
+	return budget;
+}
+
+static int xgene_enet_napi(struct napi_struct *napi, const int budget)
+{
+	struct xgene_enet_desc_ring *ring;
+	int processed;
+
+	ring = container_of(napi, struct xgene_enet_desc_ring, napi);
+	processed = xgene_enet_process_ring(ring, budget);
+
+	if (processed != budget) {
+		napi_complete(napi);
+		enable_irq(ring->irq);
+	}
+
+	return processed;
+}
+
+static void xgene_enet_timeout(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+
+	xgene_gmac_reset(pdata);
+}
+
+static int xgene_enet_register_irq(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	int ret;
+
+	ret = devm_request_irq(dev, pdata->rx_ring->irq, xgene_enet_rx_irq,
+			      IRQF_SHARED, ndev->name, pdata->rx_ring);
+	if (ret) {
+		netdev_err(ndev, "rx%d interrupt request failed\n",
+			   pdata->rx_ring->irq);
+	}
+
+	return ret;
+}
+
+static void xgene_enet_free_irq(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata;
+	struct device *dev;
+
+	pdata = netdev_priv(ndev);
+	dev = &pdata->pdev->dev;
+	devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
+}
+
+static int xgene_enet_open(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	int ret;
+
+	xgene_gmac_tx_enable(pdata);
+	xgene_gmac_rx_enable(pdata);
+
+	ret = xgene_enet_register_irq(ndev);
+	if (ret)
+		return ret;
+	napi_enable(&pdata->rx_ring->napi);
+
+	if (pdata->phy_dev)
+		phy_start(pdata->phy_dev);
+
+	netif_start_queue(ndev);
+
+	return ret;
+}
+
+static int xgene_enet_close(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+
+	netif_stop_queue(ndev);
+
+	if (pdata->phy_dev)
+		phy_stop(pdata->phy_dev);
+
+	napi_disable(&pdata->rx_ring->napi);
+	xgene_enet_free_irq(ndev);
+	xgene_enet_process_ring(pdata->rx_ring, -1);
+
+	xgene_gmac_tx_disable(pdata);
+	xgene_gmac_rx_disable(pdata);
+
+	return 0;
+}
+
+static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
+{
+	struct xgene_enet_pdata *pdata;
+	struct device *dev;
+
+	pdata = netdev_priv(ring->ndev);
+	dev = &pdata->pdev->dev;
+
+	xgene_enet_clear_ring(ring);
+	dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
+	devm_kfree(dev, ring);
+}
+
+static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+	struct xgene_enet_desc_ring *buf_pool;
+
+	if (pdata->tx_ring) {
+		xgene_enet_delete_ring(pdata->tx_ring);
+		pdata->tx_ring = NULL;
+	}
+
+	if (pdata->rx_ring) {
+		buf_pool = pdata->rx_ring->buf_pool;
+		xgene_enet_delete_bufpool(buf_pool);
+		xgene_enet_delete_ring(buf_pool);
+		devm_kfree(dev, buf_pool->rx_skb);
+
+		xgene_enet_delete_ring(pdata->rx_ring);
+		pdata->rx_ring = NULL;
+	}
+}
+
+static int xgene_enet_get_ring_size(struct device *dev,
+				    enum xgene_enet_ring_cfgsize cfgsize)
+{
+	int size = -EINVAL;
+
+	switch (cfgsize) {
+	case RING_CFGSIZE_512B:
+		size = 0x200;
+		break;
+	case RING_CFGSIZE_2KB:
+		size = 0x800;
+		break;
+	case RING_CFGSIZE_16KB:
+		size = 0x4000;
+		break;
+	case RING_CFGSIZE_64KB:
+		size = 0x10000;
+		break;
+	case RING_CFGSIZE_512KB:
+		size = 0x80000;
+		break;
+	default:
+		dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
+		break;
+	}
+
+	return size;
+}
+
+static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
+			struct net_device *ndev, u32 ring_num,
+			enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
+{
+	struct xgene_enet_desc_ring *ring;
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	u32 size;
+
+	ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
+			    GFP_KERNEL);
+	if (!ring)
+		return NULL;
+
+	ring->ndev = ndev;
+	ring->num = ring_num;
+	ring->cfgsize = cfgsize;
+	ring->id = ring_id;
+
+	size = xgene_enet_get_ring_size(dev, cfgsize);
+	ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
+					      GFP_KERNEL);
+	if (!ring->desc_addr)
+		goto err;
+	ring->size = size;
+
+	ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
+	ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
+	pdata->rm = RM3;
+	ring = xgene_enet_setup_ring(ring);
+	netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
+		   ring->num, ring->size, ring->id, ring->slots);
+
+	return ring;
+err:
+	dma_free_coherent(dev, size, ring->desc_addr, ring->dma);
+	devm_kfree(dev, ring);
+
+	return NULL;
+}
+
+static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
+{
+	return (owner << 6) | (bufnum & GENMASK(5, 0));
+}
+
+static int xgene_enet_create_desc_rings(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
+	struct xgene_enet_desc_ring *buf_pool = NULL;
+	u8 cpu_bufnum = 0, eth_bufnum = 0;
+	u8 bp_bufnum = 0x20;
+	u16 ring_id, ring_num = 0;
+	int ret;
+
+	/* allocate rx descriptor ring */
+	ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
+	rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
+					      RING_CFGSIZE_16KB, ring_id);
+	if (IS_ERR_OR_NULL(rx_ring)) {
+		ret = PTR_ERR(rx_ring);
+		goto err;
+	}
+
+	/* allocate buffer pool for receiving packets */
+	ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++);
+	buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
+					       RING_CFGSIZE_2KB, ring_id);
+	if (IS_ERR_OR_NULL(buf_pool)) {
+		ret = PTR_ERR(buf_pool);
+		goto err;
+	}
+
+	rx_ring->nbufpool = NUM_BUFPOOL;
+	rx_ring->buf_pool = buf_pool;
+	rx_ring->irq = pdata->rx_irq;
+	buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
+				     sizeof(struct sk_buff *), GFP_KERNEL);
+	if (!buf_pool->rx_skb) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
+	rx_ring->buf_pool = buf_pool;
+	pdata->rx_ring = rx_ring;
+
+	/* allocate tx descriptor ring */
+	ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++);
+	tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
+					      RING_CFGSIZE_16KB, ring_id);
+	if (IS_ERR_OR_NULL(tx_ring)) {
+		ret = PTR_ERR(tx_ring);
+		goto err;
+	}
+	pdata->tx_ring = tx_ring;
+
+	cp_ring = pdata->rx_ring;
+	cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
+				     sizeof(struct sk_buff *), GFP_KERNEL);
+	if (!cp_ring->cp_skb) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	pdata->tx_ring->cp_ring = cp_ring;
+	pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
+
+	pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
+	pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
+	pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
+
+	return 0;
+
+err:
+	xgene_enet_delete_desc_rings(pdata);
+	return ret;
+}
+
+static struct net_device_stats *xgene_enet_get_stats(struct net_device *ndev)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	struct net_device_stats *nst = &pdata->nstats;
+	struct xgene_enet_rx_stats *rx_stats;
+	struct xgene_enet_tx_stats *tx_stats;
+	u32 pkt_bytes, crc_bytes = 4;
+
+	tx_stats = &pdata->stats.tx_stats;
+	rx_stats = &pdata->stats.rx_stats;
+
+	spin_lock(&pdata->stats_lock);
+	xgene_gmac_get_tx_stats(pdata);
+
+	pkt_bytes = tx_stats->byte_cntr;
+	pkt_bytes -= tx_stats->pkt_cntr * crc_bytes;
+	nst->tx_packets += tx_stats->pkt_cntr;
+	nst->tx_bytes += pkt_bytes;
+
+	nst->tx_dropped += tx_stats->drop_frame_cntr;
+	nst->tx_errors += tx_stats->fcs_error_cntr +
+			  tx_stats->undersize_frame_cntr;
+
+	pkt_bytes = rx_stats->bytes;
+	nst->rx_packets = rx_stats->pkts;
+	nst->rx_bytes = pkt_bytes;
+
+	nst->rx_dropped = rx_stats->dropped;
+	nst->rx_length_errors = rx_stats->frm_len_err +
+				rx_stats->undersize_pkts;
+	nst->rx_over_errors = rx_stats->oversize_pkts;
+	nst->rx_crc_errors = rx_stats->crc_err;
+	nst->rx_fifo_errors = rx_stats->fifo_overrun;
+	nst->rx_errors = nst->rx_length_errors +
+			 rx_stats->crc_err +
+			 rx_stats->fifo_overrun +
+			 rx_stats->oversize_pkts +
+			 rx_stats->checksum_err +
+			 rx_stats->dropped;
+	spin_unlock(&pdata->stats_lock);
+
+	return nst;
+}
+
+static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
+{
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+	int ret;
+
+	ret = eth_mac_addr(ndev, addr);
+	if (ret)
+		return ret;
+	xgene_gmac_set_mac_addr(pdata);
+
+	return ret;
+}
+
+static const struct net_device_ops xgene_ndev_ops = {
+	.ndo_open = xgene_enet_open,
+	.ndo_stop = xgene_enet_close,
+	.ndo_start_xmit = xgene_enet_start_xmit,
+	.ndo_tx_timeout = xgene_enet_timeout,
+	.ndo_get_stats = xgene_enet_get_stats,
+	.ndo_change_mtu = eth_change_mtu,
+	.ndo_set_mac_address = xgene_enet_set_mac_address,
+};
+
+static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
+{
+	struct platform_device *pdev;
+	struct net_device *ndev;
+	struct device *dev;
+	struct resource *res;
+	void *base_addr;
+	const char *mac;
+	int ret;
+
+	pdev = pdata->pdev;
+	dev = &pdev->dev;
+	ndev = pdata->ndev;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr");
+	if (!res) {
+		dev_err(dev, "Resource enet_csr not defined\n");
+		return -ENODEV;
+	}
+	pdata->base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pdata->base_addr)) {
+		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
+		return PTR_ERR(pdata->base_addr);
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr");
+	if (!res) {
+		dev_err(dev, "Resource ring_csr not defined\n");
+		return -ENODEV;
+	}
+	pdata->ring_csr_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pdata->ring_csr_addr)) {
+		dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
+		return PTR_ERR(pdata->ring_csr_addr);
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd");
+	if (!res) {
+		dev_err(dev, "Resource ring_cmd not defined\n");
+		return -ENODEV;
+	}
+	pdata->ring_cmd_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(pdata->ring_cmd_addr)) {
+		dev_err(dev, "Unable to retrieve ENET Ring command region\n");
+		return PTR_ERR(pdata->ring_cmd_addr);
+	}
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret <= 0) {
+		dev_err(dev, "Unable to get ENET Rx IRQ\n");
+		ret = ret ? : -ENXIO;
+		return ret;
+	}
+	pdata->rx_irq = ret;
+
+	mac = of_get_mac_address(dev->of_node);
+	if (mac)
+		memcpy(ndev->dev_addr, mac, ndev->addr_len);
+	else
+		eth_hw_addr_random(ndev);
+	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+
+	pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+	if (pdata->phy_mode < 0) {
+		dev_err(dev, "Incorrect phy-connection-type in DTS\n");
+		return -EINVAL;
+	}
+
+	pdata->clk = devm_clk_get(&pdev->dev, NULL);
+	ret = IS_ERR(pdata->clk);
+	if (IS_ERR(pdata->clk)) {
+		dev_err(&pdev->dev, "can't get clock\n");
+		ret = PTR_ERR(pdata->clk);
+		return ret;
+	}
+
+	base_addr = pdata->base_addr;
+	pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
+	pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
+	pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
+	pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET;
+	pdata->mcx_stats_addr = base_addr + BLOCK_ETH_STATS_OFFSET;
+	pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
+	pdata->rx_buff_cnt = NUM_PKT_BUF;
+
+	return ret;
+}
+
+static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
+{
+	struct net_device *ndev = pdata->ndev;
+	struct xgene_enet_desc_ring *buf_pool;
+	u16 dst_ring_num;
+	int ret;
+
+	xgene_gmac_tx_disable(pdata);
+	xgene_gmac_rx_disable(pdata);
+
+	ret = xgene_enet_create_desc_rings(ndev);
+	if (ret) {
+		netdev_err(ndev, "Error in ring configuration\n");
+		return ret;
+	}
+
+	/* setup buffer pool */
+	buf_pool = pdata->rx_ring->buf_pool;
+	xgene_enet_init_bufpool(buf_pool);
+	ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
+	if (ret)
+		return ret;
+
+	dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
+	xgene_enet_cle_bypass(pdata, dst_ring_num, buf_pool->id);
+
+	return ret;
+}
+
+static int xgene_enet_probe(struct platform_device *pdev)
+{
+	struct net_device *ndev;
+	struct xgene_enet_pdata *pdata;
+	struct device *dev = &pdev->dev;
+	struct napi_struct *napi;
+	int ret;
+
+	ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
+	if (!ndev)
+		return -ENOMEM;
+
+	pdata = netdev_priv(ndev);
+
+	pdata->pdev = pdev;
+	pdata->ndev = ndev;
+	SET_NETDEV_DEV(ndev, dev);
+	platform_set_drvdata(pdev, pdata);
+	ndev->netdev_ops = &xgene_ndev_ops;
+	ndev->features |= NETIF_F_IP_CSUM |
+			  NETIF_F_GSO |
+			  NETIF_F_GRO;
+
+	ret = xgene_enet_get_resources(pdata);
+	if (ret)
+		goto err;
+
+	xgene_enet_reset(pdata);
+	xgene_gmac_init(pdata, SPEED_1000);
+
+	spin_lock_init(&pdata->stats_lock);
+	ret = register_netdev(ndev);
+	if (ret) {
+		netdev_err(ndev, "Failed to register netdev\n");
+		goto err;
+	}
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+	if (ret) {
+		netdev_err(ndev, "No usable DMA configuration\n");
+		goto err;
+	}
+
+	ret = xgene_enet_init_hw(pdata);
+	if (ret)
+		goto err;
+
+	napi = &pdata->rx_ring->napi;
+	netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
+	ret = xgene_enet_mdio_config(pdata);
+
+	return ret;
+err:
+	free_netdev(ndev);
+	return ret;
+}
+
+static int xgene_enet_remove(struct platform_device *pdev)
+{
+	struct xgene_enet_pdata *pdata;
+	struct net_device *ndev;
+
+	pdata = platform_get_drvdata(pdev);
+	ndev = pdata->ndev;
+
+	xgene_gmac_rx_disable(pdata);
+	xgene_gmac_tx_disable(pdata);
+
+	netif_napi_del(&pdata->rx_ring->napi);
+	xgene_enet_mdio_remove(pdata);
+	xgene_enet_delete_desc_rings(pdata);
+	unregister_netdev(ndev);
+	xgene_gport_shutdown(pdata);
+	free_netdev(ndev);
+
+	return 0;
+}
+
+static struct of_device_id xgene_enet_match[] = {
+	{.compatible = "apm,xgene-enet",},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, xgene_enet_match);
+
+static struct platform_driver xgene_enet_driver = {
+	.driver = {
+		   .name = "xgene-enet",
+		   .owner = THIS_MODULE,
+		   .of_match_table = xgene_enet_match,
+		   },
+	.probe = xgene_enet_probe,
+	.remove = xgene_enet_remove,
+};
+
+module_platform_driver(xgene_enet_driver);
+
+MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
new file mode 100644
index 0000000..ffbe6ca
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -0,0 +1,131 @@ 
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Authors: Iyappan Subramanian <isubramanian@apm.com>
+ *	    Ravi Patel <rapatel@apm.com>
+ *	    Keyur Chudgar <kchudgar@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_MAIN_H__
+#define __XGENE_ENET_MAIN_H__
+
+#include <linux/clk.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <net/ip.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include "xgene_enet_hw.h"
+
+#define XGENE_ENET_MAX_MTU	1536
+#define SKB_BUFFER_SIZE		(XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
+
+#define NUM_PKT_BUF	64
+#define NUM_BUFPOOL	32
+
+/* software context of a descriptor ring */
+struct xgene_enet_desc_ring {
+	struct net_device *ndev;
+	u16 id;
+	u16 num;
+	u16 head;
+	u16 tail;
+	u16 slots;
+	u16 irq;
+	u32 size;
+	u32 state[NUM_RING_CONFIG];
+	void __iomem *cmd_base;
+	void __iomem *cmd;
+	dma_addr_t dma;
+	u16 dst_ring_num;
+	u8 nbufpool;
+	struct sk_buff *(*rx_skb);
+	struct sk_buff *(*cp_skb);
+	enum xgene_enet_ring_cfgsize cfgsize;
+	struct xgene_enet_desc_ring *cp_ring;
+	struct xgene_enet_desc_ring *buf_pool;
+	struct napi_struct napi;
+	struct xgene_enet_desc desc;
+	union {
+		void *desc_addr;
+		struct xgene_enet_raw_desc *raw_desc;
+		struct xgene_enet_raw_desc16 *raw_desc16;
+	};
+};
+
+struct xgene_enet_rx_stats {
+	u32 bytes;
+	u32 pkts;
+	u32 crc_err;
+	u32 checksum_err;
+	u32 dropped;
+	u32 oversize_pkts;
+	u32 undersize_pkts;
+	u32 frm_len_err;
+	u32 fifo_overrun;
+};
+
+struct xgene_enet_tx_stats {
+	u32 byte_cntr;
+	u32 pkt_cntr;
+	u32 drop_frame_cntr;
+	u32 fcs_error_cntr;
+	u32 undersize_frame_cntr;
+};
+
+struct xgene_enet_stats {
+	struct xgene_enet_rx_stats rx_stats;
+	struct xgene_enet_tx_stats tx_stats;
+};
+
+/* ethernet private data */
+struct xgene_enet_pdata {
+	struct net_device *ndev;
+	struct mii_bus *mdio_bus;
+	struct phy_device *phy_dev;
+	int phy_link;
+	int phy_speed;
+	struct clk *clk;
+	struct platform_device *pdev;
+	struct xgene_enet_desc_ring *tx_ring;
+	struct xgene_enet_desc_ring *rx_ring;
+	struct net_device_stats nstats;
+	char *dev_name;
+	u32 rx_buff_cnt;
+	u32 tx_qcnt_hi;
+	u32 cp_qcnt_hi;
+	u32 cp_qcnt_low;
+	u32 rx_irq;
+	void __iomem *eth_csr_addr;
+	void __iomem *eth_ring_if_addr;
+	void __iomem *eth_diag_csr_addr;
+	void __iomem *mcx_mac_addr;
+	void __iomem *mcx_stats_addr;
+	void __iomem *mcx_mac_csr_addr;
+	void __iomem *base_addr;
+	void __iomem *ring_csr_addr;
+	void __iomem *ring_cmd_addr;
+	u32 phy_addr;
+	int phy_mode;
+	u32 speed;
+	u16 rm;
+	struct xgene_enet_stats stats;
+	spinlock_t stats_lock;
+};
+
+#endif /* __XGENE_ENET_MAIN_H__ */