diff mbox series

[net-next,v5,10/10] net: axienet: Introduce dmaengine support

Message ID 1691387509-2113129-11-git-send-email-radhey.shyam.pandey@amd.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series net: axienet: Introduce dmaengine | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1332 this patch: 1330
netdev/cc_maintainers success CCed 9 of 9 maintainers
netdev/build_clang success Errors and warnings before: 1353 this patch: 1353
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 1355 this patch: 1353
netdev/checkpatch warning WARNING: Possible repeated word: 'Socket' WARNING: line length of 84 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 91 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 2 this patch: 2
netdev/source_inline fail Was 0 now: 1

Commit Message

Pandey, Radhey Shyam Aug. 7, 2023, 5:51 a.m. UTC
Add dmaengine framework to communicate with the xilinx DMAengine
driver(AXIDMA).

Axi ethernet driver uses separate channels for transmit and receive.
Add support for these channels to handle TX and RX with skb and
appropriate callbacks. Also add axi ethernet core interrupt for
dmaengine framework support.

The dmaengine framework was extended for metadata API support.
However it still needs further enhancements to make it well suited for
ethernet usecases. The ethernet features i.e ethtool set/get of DMA IP
properties, ndo_poll_controller,(mentioned in TODO) are not supported
and it requires follow-up discussions.

dmaengine support has a dependency on xilinx_dma as it uses
xilinx_vdma_channel_set_config() API to reset the DMA IP
which internally reset MAC prior to accessing MDIO.

Benchmark with netperf:

xilinx-zcu102-20232:~$ netperf -H 192.168.10.20 -t TCP_STREAM
MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET
to 192.168.10.20 () port 0 AF_INET
Recv   Send    Send
Socket Socket  Message  Elapsed
Size   Size    Size     Time     Throughput
bytes  bytes   bytes    secs.    10^6bits/sec

131072  16384  16384    10.03     915.55

xilinx-zcu102-20232:~$ netperf -H 192.168.10.20 -t UDP_STREAM
MIGRATED UDP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET
to 192.168.10.20 () port 0 AF_INET
Socket  Message  Elapsed      Messages
Size    Size     Time         Okay Errors   Throughput
bytes   bytes    secs            #      #   10^6bits/sec

212992   65507   10.00       18192      0     953.35
212992           10.00       18192            953.35

Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
---
Changes for v5:
- Switch to amd.com email
- Modified commit description. Remove lore link, mention reset API,
  add performance numbers.
- Fix kmem_cache resource leak on stop.
- Use dmaengine_terminate_sync instead of deprecated
  dmaengine_terminate_all API.

Changes for v4:
- Remove the AXIENET_USE_DMA.
- Add dev_err_probe for dma_request_chan error handling.
- Add kmem_cache_destroy for create in axienet_setup_dma_chan.
- Add XILINX_DMA dependency in ethernet drier Kconfig file.
- move setup_dma_channel to init_dmaengine func
- Remove unlikely
	if (unlikely(ret < 0))
- if (ret == 0) to if (!ret)
- Rename DMA_MEM_TO_DEV to DMA_TO_DEVICE
- Remove else check for lp->use_dmaengine = 1; in the probe.

Changes in V3:
- New patch for dmaengine framework support.
---
 drivers/net/ethernet/xilinx/Kconfig           |   1 +
 drivers/net/ethernet/xilinx/xilinx_axienet.h  |   8 +
 .../net/ethernet/xilinx/xilinx_axienet_main.c | 321 +++++++++++++++++-
 3 files changed, 328 insertions(+), 2 deletions(-)

Comments

Jakub Kicinski Aug. 8, 2023, 10:48 p.m. UTC | #1
On Mon, 7 Aug 2023 11:21:49 +0530 Radhey Shyam Pandey wrote:
> +struct axi_skbuff {
> +	struct scatterlist sgl[MAX_SKB_FRAGS + 1];
> +	struct dma_async_tx_descriptor *desc;
> +	dma_addr_t dma_address;
> +	struct sk_buff *skb;
> +	struct list_head lh;
> +	int sg_len;
> +} __packed;

Why __packed?

> +static netdev_tx_t
> +axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
> +{
> +	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
> +	struct axienet_local *lp = netdev_priv(ndev);
> +	u32 app[DMA_NUM_APP_WORDS] = {0};
> +	struct axi_skbuff *axi_skb;
> +	u32 csum_start_off;
> +	u32 csum_index_off;
> +	int sg_len;
> +	int ret;
> +
> +	sg_len = skb_shinfo(skb)->nr_frags + 1;
> +	axi_skb = kmem_cache_zalloc(lp->skb_cache, GFP_KERNEL);
> +	if (!axi_skb)
> +		return NETDEV_TX_BUSY;

Drop on error, you're not stopping the queue correctly, just drop,
return OK and avoid bugs.

> +static inline int axienet_init_dmaengine(struct net_device *ndev)

Why inline? Please remove the spurious inline annotations.

> +{
> +	struct axienet_local *lp = netdev_priv(ndev);
> +	int i, ret;
> +
> +	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
> +	if (IS_ERR(lp->tx_chan)) {
> +		ret = PTR_ERR(lp->tx_chan);
> +		return dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
> +	}
> +
> +	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
> +	if (IS_ERR(lp->rx_chan)) {
> +		ret = PTR_ERR(lp->rx_chan);
> +		dev_err_probe(lp->dev, ret, "No Ethernet DMA (RX) channel found\n");
> +		goto err_dma_request_rx;

name labels after the target, not the source. Makes it a ton easier 
to maintain sanity when changing the code later.

> +	}
> +
> +	lp->skb_cache = kmem_cache_create("ethernet", sizeof(struct axi_skbuff),
> +					  0, 0, NULL);

Why create a cache ?
Isn't it cleaner to create a fake ring buffer of sgl? Most packets will
not have MAX_SKB_FRAGS of memory. On a ring buffer you can use only as
many sg entries as the packet requires. Also no need to alloc/free.

> +	if (!lp->skb_cache) {
> +		ret =  -ENOMEM;

double space, please fix everywhere

> +		goto err_kmem;

> @@ -2140,6 +2432,31 @@ static int axienet_probe(struct platform_device *pdev)
>  		}
>  		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
>  		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
> +	} else {
> +		struct xilinx_vdma_config cfg;
> +		struct dma_chan *tx_chan;
> +
> +		lp->eth_irq = platform_get_irq_optional(pdev, 0);

This can't fail?

> +		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
> +
> +		if (IS_ERR(tx_chan)) {

no empty lines between call and its error check, please fix thru out

> +			ret = PTR_ERR(tx_chan);
> +			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
> +			goto cleanup_clk;
> +		}
Pandey, Radhey Shyam Aug. 12, 2023, 3:27 p.m. UTC | #2
> -----Original Message-----
> From: Jakub Kicinski <kuba@kernel.org>
> Sent: Wednesday, August 9, 2023 4:19 AM
> To: Pandey, Radhey Shyam <radhey.shyam.pandey@amd.com>
> Cc: vkoul@kernel.org; robh+dt@kernel.org;
> krzysztof.kozlowski+dt@linaro.org; conor+dt@kernel.org; Simek, Michal
> <michal.simek@amd.com>; davem@davemloft.net; edumazet@google.com;
> pabeni@redhat.com; linux@armlinux.org.uk; dmaengine@vger.kernel.org;
> devicetree@vger.kernel.org; linux-arm-kernel@lists.infradead.org; linux-
> kernel@vger.kernel.org; netdev@vger.kernel.org; git (AMD-Xilinx)
> <git@amd.com>
> Subject: Re: [PATCH net-next v5 10/10] net: axienet: Introduce dmaengine
> support
> 
> On Mon, 7 Aug 2023 11:21:49 +0530 Radhey Shyam Pandey wrote:
> > +struct axi_skbuff {
> > +	struct scatterlist sgl[MAX_SKB_FRAGS + 1];
> > +	struct dma_async_tx_descriptor *desc;
> > +	dma_addr_t dma_address;
> > +	struct sk_buff *skb;
> > +	struct list_head lh;
> > +	int sg_len;
> > +} __packed;
> 
> Why __packed?

It was added considering size optimization, but I see it may have 
performance overheads. Will remove it in next version.

> 
> > +static netdev_tx_t
> > +axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device
> > +*ndev) {
> > +	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
> > +	struct axienet_local *lp = netdev_priv(ndev);
> > +	u32 app[DMA_NUM_APP_WORDS] = {0};
> > +	struct axi_skbuff *axi_skb;
> > +	u32 csum_start_off;
> > +	u32 csum_index_off;
> > +	int sg_len;
> > +	int ret;
> > +
> > +	sg_len = skb_shinfo(skb)->nr_frags + 1;
> > +	axi_skb = kmem_cache_zalloc(lp->skb_cache, GFP_KERNEL);
> > +	if (!axi_skb)
> > +		return NETDEV_TX_BUSY;
> 
> Drop on error, you're not stopping the queue correctly, just drop, return OK
> and avoid bugs.

As I understand NETDEV_TX_OK returns means driver took care of packet.
So inline with non-dmaengine xmit (axienet_start_xmit_legacy) should
we stop the queue and return TX_BUSY?

> 
> > +static inline int axienet_init_dmaengine(struct net_device *ndev)
> 
> Why inline? Please remove the spurious inline annotations.

Ok will fix it in next version

> 
> > +{
> > +	struct axienet_local *lp = netdev_priv(ndev);
> > +	int i, ret;
> > +
> > +	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
> > +	if (IS_ERR(lp->tx_chan)) {
> > +		ret = PTR_ERR(lp->tx_chan);
> > +		return dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX)
> channel found\n");
> > +	}
> > +
> > +	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
> > +	if (IS_ERR(lp->rx_chan)) {
> > +		ret = PTR_ERR(lp->rx_chan);
> > +		dev_err_probe(lp->dev, ret, "No Ethernet DMA (RX) channel
> found\n");
> > +		goto err_dma_request_rx;
> 
> name labels after the target, not the source. Makes it a ton easier to
> maintain sanity when changing the code later.

Ok will rename it to target i.e goto err_dma_free_tx and fix other as well.

> 
> > +	}
> > +
> > +	lp->skb_cache = kmem_cache_create("ethernet", sizeof(struct
> axi_skbuff),
> > +					  0, 0, NULL);
> 
> Why create a cache ?
> Isn't it cleaner to create a fake ring buffer of sgl? Most packets will not have
> MAX_SKB_FRAGS of memory. On a ring buffer you can use only as many sg
> entries as the packet requires. Also no need to alloc/free.


The kmem_cache is used with intent to use slab cache interface and
make use of reusing objects in the kernel. slab cache maintains a 
cache of objects. When we free an object, instead of
deallocating it, it give it back to the cache. Next time, if we
want to create a new object, slab cache gives us one object from the
slab cache.

If we maintain custom circular buffer (struct circ_buf) ring buffer 
we have to create two such ring buffers one for TX and other for RX.
For multichannel this will multiply to * no of queues. Also we have to
ensure proper occupancy checks and head/tail pointer updates.

With kmem_cache pool we are offloading queue maintenance ops to
framework with a benefit of optimized alloc/dealloc. Let me know if it 
looks functionally fine and can retain it for this baseline dmaengine 
support version?

> 
> > +	if (!lp->skb_cache) {
> > +		ret =  -ENOMEM;
> 
> double space, please fix everywhere

Will fix it in next version.

> 
> > +		goto err_kmem;
> 
> > @@ -2140,6 +2432,31 @@ static int axienet_probe(struct platform_device
> *pdev)
> >  		}
> >  		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
> >  		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
> > +	} else {
> > +		struct xilinx_vdma_config cfg;
> > +		struct dma_chan *tx_chan;
> > +
> > +		lp->eth_irq = platform_get_irq_optional(pdev, 0);
> 
> This can't fail?

I will add check for lp->eth_irq != -ENXIO and add its error handling.
> 
> > +		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
> > +
> > +		if (IS_ERR(tx_chan)) {
> 
> no empty lines between call and its error check, please fix thru out

Ok will fix and cross-check for all other references.
> 
> > +			ret = PTR_ERR(tx_chan);
> > +			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX)
> channel found\n");
> > +			goto cleanup_clk;
> > +		}
> --
> pw-bot: cr
Jakub Kicinski Aug. 14, 2023, 3:29 p.m. UTC | #3
On Sat, 12 Aug 2023 15:27:19 +0000 Pandey, Radhey Shyam wrote:
> > Drop on error, you're not stopping the queue correctly, just drop, return OK
> > and avoid bugs.  
> 
> As I understand NETDEV_TX_OK returns means driver took care of packet.
> So inline with non-dmaengine xmit (axienet_start_xmit_legacy) should
> we stop the queue and return TX_BUSY?

You should only return BUSY if there is no space. All other errors
should lead to drops, and increment of tx_error. Otherwise problem
with handling a single packet may stall the NIC forever.
It is somewhat confusing that we return TX_OK in that case but it
is what it is.

> > Why create a cache ?
> > Isn't it cleaner to create a fake ring buffer of sgl? Most packets will not have
> > MAX_SKB_FRAGS of memory. On a ring buffer you can use only as many sg
> > entries as the packet requires. Also no need to alloc/free.  
> 
> The kmem_cache is used with intent to use slab cache interface and
> make use of reusing objects in the kernel. slab cache maintains a 
> cache of objects. When we free an object, instead of
> deallocating it, it give it back to the cache. Next time, if we
> want to create a new object, slab cache gives us one object from the
> slab cache.
> 
> If we maintain custom circular buffer (struct circ_buf) ring buffer 
> we have to create two such ring buffers one for TX and other for RX.
> For multichannel this will multiply to * no of queues. Also we have to
> ensure proper occupancy checks and head/tail pointer updates.
> 
> With kmem_cache pool we are offloading queue maintenance ops to
> framework with a benefit of optimized alloc/dealloc. Let me know if it 
> looks functionally fine and can retain it for this baseline dmaengine 
> support version?

The kmemcache is not the worst possible option but note that the
objects you're allocating (with zeroing) are 512+ bytes. That's
pretty large, when most packets will not have full 16 fragments.
Ring buffer would allow to better match the allocation size to 
the packets. Not to mention that it can be done fully locklessly.
Pandey, Radhey Shyam Aug. 23, 2023, 5:38 p.m. UTC | #4
> -----Original Message-----
> From: Jakub Kicinski <kuba@kernel.org>
> Sent: Monday, August 14, 2023 9:00 PM
> To: Pandey, Radhey Shyam <radhey.shyam.pandey@amd.com>
> Cc: vkoul@kernel.org; robh+dt@kernel.org;
> krzysztof.kozlowski+dt@linaro.org; conor+dt@kernel.org; Simek, Michal
> <michal.simek@amd.com>; davem@davemloft.net; edumazet@google.com;
> pabeni@redhat.com; linux@armlinux.org.uk; dmaengine@vger.kernel.org;
> devicetree@vger.kernel.org; linux-arm-kernel@lists.infradead.org; linux-
> kernel@vger.kernel.org; netdev@vger.kernel.org; git (AMD-Xilinx)
> <git@amd.com>
> Subject: Re: [PATCH net-next v5 10/10] net: axienet: Introduce dmaengine
> support
> 
> On Sat, 12 Aug 2023 15:27:19 +0000 Pandey, Radhey Shyam wrote:
> > > Drop on error, you're not stopping the queue correctly, just drop, return
> OK
> > > and avoid bugs.
> >
> > As I understand NETDEV_TX_OK returns means driver took care of packet.
> > So inline with non-dmaengine xmit (axienet_start_xmit_legacy) should
> > we stop the queue and return TX_BUSY?
> 
> You should only return BUSY if there is no space. All other errors
> should lead to drops, and increment of tx_error. Otherwise problem
> with handling a single packet may stall the NIC forever.
> It is somewhat confusing that we return TX_OK in that case but it
> is what it is.
> 
> > > Why create a cache ?
> > > Isn't it cleaner to create a fake ring buffer of sgl? Most packets will not
> have
> > > MAX_SKB_FRAGS of memory. On a ring buffer you can use only as many
> sg
> > > entries as the packet requires. Also no need to alloc/free.
> >
> > The kmem_cache is used with intent to use slab cache interface and
> > make use of reusing objects in the kernel. slab cache maintains a
> > cache of objects. When we free an object, instead of
> > deallocating it, it give it back to the cache. Next time, if we
> > want to create a new object, slab cache gives us one object from the
> > slab cache.
> >
> > If we maintain custom circular buffer (struct circ_buf) ring buffer
> > we have to create two such ring buffers one for TX and other for RX.
> > For multichannel this will multiply to * no of queues. Also we have to
> > ensure proper occupancy checks and head/tail pointer updates.
> >
> > With kmem_cache pool we are offloading queue maintenance ops to
> > framework with a benefit of optimized alloc/dealloc. Let me know if it
> > looks functionally fine and can retain it for this baseline dmaengine
> > support version?
> 
> The kmemcache is not the worst possible option but note that the
> objects you're allocating (with zeroing) are 512+ bytes. That's
> pretty large, when most packets will not have full 16 fragments.
> Ring buffer would allow to better match the allocation size to
> the packets. Not to mention that it can be done fully locklessly.

I modified the implementation to use a circular ring buffer for TX
and RX. It seems to be working in initial testing and now running 
perf tests.

Just had one question on when to submit v6 ? Wait till dmaengine
patches([01/10-[07/10] is part of net-next? Or can I send it now also.

Thanks,
Radhey
Jakub Kicinski Aug. 24, 2023, 1:10 a.m. UTC | #5
On Wed, 23 Aug 2023 17:38:58 +0000 Pandey, Radhey Shyam wrote:
> > The kmemcache is not the worst possible option but note that the
> > objects you're allocating (with zeroing) are 512+ bytes. That's
> > pretty large, when most packets will not have full 16 fragments.
> > Ring buffer would allow to better match the allocation size to
> > the packets. Not to mention that it can be done fully locklessly.  
> 
> I modified the implementation to use a circular ring buffer for TX
> and RX. It seems to be working in initial testing and now running 
> perf tests.
> 
> Just had one question on when to submit v6 ? Wait till dmaengine
> patches([01/10-[07/10] is part of net-next? Or can I send it now also.

Assuming Linus cuts final this Sunday - after Sept 10th.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 0014729b8865..35d96c633a33 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -26,6 +26,7 @@  config XILINX_EMACLITE
 config XILINX_AXI_EMAC
 	tristate "Xilinx 10/100/1000 AXI Ethernet support"
 	depends on HAS_IOMEM
+	depends on XILINX_DMA
 	select PHYLINK
 	help
 	  This driver supports the 10/100/1000 Ethernet from Xilinx for the
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 3ead0bac597b..d8c6d0afa8a3 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -436,6 +436,10 @@  struct axidma_bd {
  * @coalesce_count_tx:	Store the irq coalesce on TX side.
  * @coalesce_usec_tx:	IRQ coalesce delay for TX
  * @use_dmaengine: flag to check dmaengine framework usage.
+ * @tx_chan:	TX DMA channel.
+ * @rx_chan:	RX DMA channel.
+ * @skb_cache:	Custom skb slab allocator
+ * @rx_chan_skb: List to store RX descriptors custom SKB pointer
  */
 struct axienet_local {
 	struct net_device *ndev;
@@ -501,6 +505,10 @@  struct axienet_local {
 	u32 coalesce_count_tx;
 	u32 coalesce_usec_tx;
 	u8  use_dmaengine;
+	struct dma_chan *tx_chan;
+	struct dma_chan *rx_chan;
+	struct kmem_cache *skb_cache;
+	struct list_head rx_chan_skb;
 };
 
 /**
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 36c77248a55e..1de5c4992ecf 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -37,6 +37,9 @@ 
 #include <linux/phy.h>
 #include <linux/mii.h>
 #include <linux/ethtool.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/xilinx_dma.h>
 
 #include "xilinx_axienet.h"
 
@@ -46,6 +49,9 @@ 
 #define TX_BD_NUM_MIN			(MAX_SKB_FRAGS + 1)
 #define TX_BD_NUM_MAX			4096
 #define RX_BD_NUM_MAX			4096
+#define DMA_NUM_APP_WORDS		5
+#define LEN_APP				4
+#define RX_BUF_NUM_DEFAULT		128
 
 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
 #define DRIVER_NAME		"xaxienet"
@@ -54,6 +60,17 @@ 
 
 #define AXIENET_REGS_N		40
 
+struct axi_skbuff {
+	struct scatterlist sgl[MAX_SKB_FRAGS + 1];
+	struct dma_async_tx_descriptor *desc;
+	dma_addr_t dma_address;
+	struct sk_buff *skb;
+	struct list_head lh;
+	int sg_len;
+} __packed;
+
+static int axienet_rx_submit_desc(struct net_device *ndev);
+
 /* Match table for of_platform binding */
 static const struct of_device_id axienet_of_match[] = {
 	{ .compatible = "xlnx,axi-ethernet-1.00.a", },
@@ -726,6 +743,108 @@  static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 	return 0;
 }
 
+/**
+ * axienet_dma_tx_cb - DMA engine callback for TX channel.
+ * @data:       Pointer to the axi_skbuff structure
+ * @result:     error reporting through dmaengine_result.
+ * This function is called by dmaengine driver for TX channel to notify
+ * that the transmit is done.
+ */
+static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
+{
+	struct axi_skbuff *axi_skb = data;
+
+	struct net_device *netdev = axi_skb->skb->dev;
+	struct axienet_local *lp = netdev_priv(netdev);
+
+	u64_stats_update_begin(&lp->tx_stat_sync);
+	u64_stats_add(&lp->tx_bytes, axi_skb->skb->len);
+	u64_stats_add(&lp->tx_packets, 1);
+	u64_stats_update_end(&lp->tx_stat_sync);
+
+	dma_unmap_sg(lp->dev, axi_skb->sgl, axi_skb->sg_len, DMA_TO_DEVICE);
+	dev_kfree_skb_any(axi_skb->skb);
+	kmem_cache_free(lp->skb_cache, axi_skb);
+}
+
+/**
+ * axienet_start_xmit_dmaengine - Starts the transmission.
+ * @skb:        sk_buff pointer that contains data to be Txed.
+ * @ndev:       Pointer to net_device structure.
+ *
+ * Return: NETDEV_TX_OK, on success
+ *          NETDEV_TX_BUSY, if any memory failure or SG error.
+ *
+ * This function is invoked from xmit to initiate transmission. The
+ * function sets the skbs , call back API, SG etc.
+ * Additionally if checksum offloading is supported,
+ * it populates AXI Stream Control fields with appropriate values.
+ */
+static netdev_tx_t
+axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct dma_async_tx_descriptor *dma_tx_desc = NULL;
+	struct axienet_local *lp = netdev_priv(ndev);
+	u32 app[DMA_NUM_APP_WORDS] = {0};
+	struct axi_skbuff *axi_skb;
+	u32 csum_start_off;
+	u32 csum_index_off;
+	int sg_len;
+	int ret;
+
+	sg_len = skb_shinfo(skb)->nr_frags + 1;
+	axi_skb = kmem_cache_zalloc(lp->skb_cache, GFP_KERNEL);
+	if (!axi_skb)
+		return NETDEV_TX_BUSY;
+
+	sg_init_table(axi_skb->sgl, sg_len);
+	ret = skb_to_sgvec(skb, axi_skb->sgl, 0, skb->len);
+	if (ret < 0)
+		goto xmit_error_skb_sgvec;
+
+	ret = dma_map_sg(lp->dev, axi_skb->sgl, sg_len, DMA_TO_DEVICE);
+	if (!ret)
+		goto xmit_error_skb_sgvec;
+
+	/*Fill up app fields for checksum */
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
+			/* Tx Full Checksum Offload Enabled */
+			app[0] |= 2;
+		} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
+			csum_start_off = skb_transport_offset(skb);
+			csum_index_off = csum_start_off + skb->csum_offset;
+			/* Tx Partial Checksum Offload Enabled */
+			app[0] |= 1;
+			app[1] = (csum_start_off << 16) | csum_index_off;
+		}
+	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+		app[0] |= 2; /* Tx Full Checksum Offload Enabled */
+	}
+
+	dma_tx_desc = lp->tx_chan->device->device_prep_slave_sg(lp->tx_chan, axi_skb->sgl,
+			sg_len, DMA_MEM_TO_DEV,
+			DMA_PREP_INTERRUPT, (void *)app);
+
+	if (!dma_tx_desc)
+		goto xmit_error_prep;
+
+	axi_skb->skb = skb;
+	axi_skb->sg_len = sg_len;
+	dma_tx_desc->callback_param =  axi_skb;
+	dma_tx_desc->callback_result = axienet_dma_tx_cb;
+	dmaengine_submit(dma_tx_desc);
+	dma_async_issue_pending(lp->tx_chan);
+
+	return NETDEV_TX_OK;
+
+xmit_error_prep:
+	dma_unmap_sg(lp->dev, axi_skb->sgl, sg_len, DMA_TO_DEVICE);
+xmit_error_skb_sgvec:
+	kmem_cache_free(lp->skb_cache, axi_skb);
+	return NETDEV_TX_BUSY;
+}
+
 /**
  * axienet_tx_poll - Invoked once a transmit is completed by the
  * Axi DMA Tx channel.
@@ -910,7 +1029,43 @@  axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 	if (!lp->use_dmaengine)
 		return axienet_start_xmit_legacy(skb, ndev);
 	else
-		return NETDEV_TX_BUSY;
+		return axienet_start_xmit_dmaengine(skb, ndev);
+}
+
+/**
+ * axienet_dma_rx_cb - DMA engine callback for RX channel.
+ * @data:       Pointer to the axi_skbuff structure
+ * @result:     error reporting through dmaengine_result.
+ * This function is called by dmaengine driver for RX channel to notify
+ * that the packet is received.
+ */
+static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
+{
+	struct axi_skbuff *axi_skb = data;
+	struct sk_buff *skb = axi_skb->skb;
+	struct net_device *netdev = skb->dev;
+	struct axienet_local *lp = netdev_priv(netdev);
+	size_t meta_len, meta_max_len, rx_len;
+	u32 *app;
+
+	app  = dmaengine_desc_get_metadata_ptr(axi_skb->desc, &meta_len, &meta_max_len);
+	dma_unmap_single(lp->dev, axi_skb->dma_address, lp->max_frm_size,
+			 DMA_FROM_DEVICE);
+	/* TODO: Derive app word index programmatically */
+	rx_len = (app[LEN_APP] & 0xFFFF);
+	skb_put(skb, rx_len);
+	skb->protocol = eth_type_trans(skb, netdev);
+	skb->ip_summed = CHECKSUM_NONE;
+
+	netif_rx(skb);
+	list_del(&axi_skb->lh);
+	kmem_cache_free(lp->skb_cache, axi_skb);
+	u64_stats_update_begin(&lp->rx_stat_sync);
+	u64_stats_add(&lp->rx_packets, 1);
+	u64_stats_add(&lp->rx_bytes, rx_len);
+	u64_stats_update_end(&lp->rx_stat_sync);
+	axienet_rx_submit_desc(netdev);
+	dma_async_issue_pending(lp->rx_chan);
 }
 
 /**
@@ -1146,6 +1301,112 @@  static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
 
 static void axienet_dma_err_handler(struct work_struct *work);
 
+/**
+ * axienet_rx_submit_desc - Submit the descriptors with required data
+ * like call backup API, skb buffer.. etc to dmaengine.
+ *
+ * @ndev:	net_device pointer
+ *
+ *Return: 0, on success.
+ *          non-zero error value on failure
+ */
+static int axienet_rx_submit_desc(struct net_device *ndev)
+{
+	struct dma_async_tx_descriptor *dma_rx_desc = NULL;
+	struct axienet_local *lp = netdev_priv(ndev);
+	struct axi_skbuff *axi_skb;
+	struct sk_buff *skb;
+	dma_addr_t addr;
+	int ret;
+
+	axi_skb = kmem_cache_alloc(lp->skb_cache, GFP_KERNEL);
+
+	if (!axi_skb)
+		return -ENOMEM;
+	skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+	if (!skb) {
+		ret = -ENOMEM;
+		goto rx_bd_init_skb;
+	}
+
+	sg_init_table(axi_skb->sgl, 1);
+	addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
+	sg_dma_address(axi_skb->sgl) = addr;
+	sg_dma_len(axi_skb->sgl) = lp->max_frm_size;
+	dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, axi_skb->sgl,
+					      1, DMA_DEV_TO_MEM,
+					      DMA_PREP_INTERRUPT);
+	if (!dma_rx_desc) {
+		ret = -EINVAL;
+		goto rx_bd_init_prep_sg;
+	}
+
+	axi_skb->skb = skb;
+	axi_skb->dma_address = sg_dma_address(axi_skb->sgl);
+	axi_skb->desc = dma_rx_desc;
+	dma_rx_desc->callback_param =  axi_skb;
+	dma_rx_desc->callback_result = axienet_dma_rx_cb;
+	list_add_tail(&axi_skb->lh, &lp->rx_chan_skb);
+	dmaengine_submit(dma_rx_desc);
+
+	return 0;
+
+rx_bd_init_prep_sg:
+	dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
+	dev_kfree_skb(skb);
+rx_bd_init_skb:
+	kmem_cache_free(lp->skb_cache, axi_skb);
+	return ret;
+}
+
+/**
+ * axienet_init_dmaengine - init the dmaengine code.
+ * @ndev:       Pointer to net_device structure
+ *
+ * Return: 0, on success.
+ *          non-zero error value on failure
+ *
+ * This is the dmaengine initialization code.
+ */
+static inline int axienet_init_dmaengine(struct net_device *ndev)
+{
+	struct axienet_local *lp = netdev_priv(ndev);
+	int i, ret;
+
+	lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
+	if (IS_ERR(lp->tx_chan)) {
+		ret = PTR_ERR(lp->tx_chan);
+		return dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
+	}
+
+	lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
+	if (IS_ERR(lp->rx_chan)) {
+		ret = PTR_ERR(lp->rx_chan);
+		dev_err_probe(lp->dev, ret, "No Ethernet DMA (RX) channel found\n");
+		goto err_dma_request_rx;
+	}
+
+	lp->skb_cache = kmem_cache_create("ethernet", sizeof(struct axi_skbuff),
+					  0, 0, NULL);
+	if (!lp->skb_cache) {
+		ret =  -ENOMEM;
+		goto err_kmem;
+	}
+
+	INIT_LIST_HEAD(&lp->rx_chan_skb);
+	/* TODO: Instead of BD_NUM_DEFAULT use runtime support*/
+	for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
+		axienet_rx_submit_desc(ndev);
+	dma_async_issue_pending(lp->rx_chan);
+
+	return 0;
+err_kmem:
+	dma_release_channel(lp->rx_chan);
+err_dma_request_rx:
+	dma_release_channel(lp->tx_chan);
+	return ret;
+}
+
 /**
  * axienet_init_legacy_dma - init the dma legacy code.
  * @ndev:       Pointer to net_device structure
@@ -1237,7 +1498,24 @@  static int axienet_open(struct net_device *ndev)
 
 	phylink_start(lp->phylink);
 
-	if (!lp->use_dmaengine) {
+	if (lp->use_dmaengine) {
+		/* Enable interrupts for Axi Ethernet core (if defined) */
+		if (lp->eth_irq > 0) {
+			ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
+					  ndev->name, ndev);
+			if (ret)
+				goto error_code;
+		}
+
+		ret = axienet_init_dmaengine(ndev);
+
+		if (ret < 0) {
+			if (lp->eth_irq > 0)
+				free_irq(lp->eth_irq, ndev);
+			goto error_code;
+		}
+
+	} else {
 		ret = axienet_init_legacy_dma(ndev);
 		if (ret)
 			goto error_code;
@@ -1265,6 +1543,7 @@  static int axienet_open(struct net_device *ndev)
 static int axienet_stop(struct net_device *ndev)
 {
 	struct axienet_local *lp = netdev_priv(ndev);
+	struct axi_skbuff *askb, *skb;
 
 	dev_dbg(&ndev->dev, "axienet_close()\n");
 
@@ -1285,6 +1564,19 @@  static int axienet_stop(struct net_device *ndev)
 		free_irq(lp->tx_irq, ndev);
 		free_irq(lp->rx_irq, ndev);
 		axienet_dma_bd_release(ndev);
+	} else {
+		dmaengine_terminate_sync(lp->tx_chan);
+		dmaengine_synchronize(lp->tx_chan);
+		dmaengine_terminate_sync(lp->rx_chan);
+		dmaengine_synchronize(lp->rx_chan);
+
+		list_for_each_entry_safe(askb, skb, &lp->rx_chan_skb, lh) {
+			list_del(&askb->lh);
+			kmem_cache_free(lp->skb_cache, askb);
+		}
+		dma_release_channel(lp->rx_chan);
+		dma_release_channel(lp->tx_chan);
+		kmem_cache_destroy(lp->skb_cache);
 	}
 
 	axienet_iow(lp, XAE_IE_OFFSET, 0);
@@ -2140,6 +2432,31 @@  static int axienet_probe(struct platform_device *pdev)
 		}
 		netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
 		netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
+	} else {
+		struct xilinx_vdma_config cfg;
+		struct dma_chan *tx_chan;
+
+		lp->eth_irq = platform_get_irq_optional(pdev, 0);
+		tx_chan = dma_request_chan(lp->dev, "tx_chan0");
+
+		if (IS_ERR(tx_chan)) {
+			ret = PTR_ERR(tx_chan);
+			dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
+			goto cleanup_clk;
+		}
+
+		cfg.reset = 1;
+		/* As name says VDMA but it has support for DMA channel reset*/
+		ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
+
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Reset channel failed\n");
+			dma_release_channel(tx_chan);
+			goto cleanup_clk;
+		}
+
+		dma_release_channel(tx_chan);
+		lp->use_dmaengine = 1;
 	}
 
 	/* Check for Ethernet core IRQ (optional) */