diff mbox series

[2/2] usb: chipidea: udc: create bounce buffer for problem sglist entries if possible

Message ID 20240912045150.915573-2-xu.yang_2@nxp.com (mailing list archive)
State New
Headers show
Series [1/2] usb: chipidea: udc: limit usb request length to max 16KB | expand

Commit Message

Xu Yang Sept. 12, 2024, 4:51 a.m. UTC
The chipidea controller doesn't fully support sglist, such as it can not
transfer data spanned more dTDs to form a bus packet, so it can only work
on very limited cases.

The limitations as below:
1. the end address of the first sg buffer must be 4KB aligned.
2. the start and end address of the middle sg buffer must be 4KB aligned.
3. the start address of the first sg buffer must be 4KB aligned.

However, not all the use cases violate these limitations. To make the
controller compatible with most of the cases, this will try to bounce the
problem sglist entries which can be found by sglist_get_invalid_entry().
Then a bounced line buffer (the size will roundup to page size) will be
allocated to replace the remaining problem sg entries. The data will be
copied between problem sg entries and bounce buffer according to the
transfer direction. The bounce buffer will be freed when the request
completed.

Signed-off-by: Xu Yang <xu.yang_2@nxp.com>
---
 drivers/usb/chipidea/udc.c | 147 +++++++++++++++++++++++++++++++++++++
 drivers/usb/chipidea/udc.h |   2 +
 2 files changed, 149 insertions(+)

Comments

Peter Chen Sept. 13, 2024, 1:34 a.m. UTC | #1
On 24-09-12 12:51:50, Xu Yang wrote:
> The chipidea controller doesn't fully support sglist, such as it can not
> transfer data spanned more dTDs to form a bus packet, so it can only work
> on very limited cases.
> 
> The limitations as below:
> 1. the end address of the first sg buffer must be 4KB aligned.
> 2. the start and end address of the middle sg buffer must be 4KB aligned.
> 3. the start address of the first sg buffer must be 4KB aligned.
> 
> However, not all the use cases violate these limitations. To make the
> controller compatible with most of the cases, this will try to bounce the
> problem sglist entries which can be found by sglist_get_invalid_entry().
> Then a bounced line buffer (the size will roundup to page size) will be
> allocated to replace the remaining problem sg entries. The data will be
> copied between problem sg entries and bounce buffer according to the
> transfer direction. The bounce buffer will be freed when the request
> completed.
> 
> Signed-off-by: Xu Yang <xu.yang_2@nxp.com>

Acked-by: Peter Chen <peter.chen@kernel.com>

> ---
>  drivers/usb/chipidea/udc.c | 147 +++++++++++++++++++++++++++++++++++++
>  drivers/usb/chipidea/udc.h |   2 +
>  2 files changed, 149 insertions(+)
> 
> diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
> index 5d9369d01065..dc4dae2c31b4 100644
> --- a/drivers/usb/chipidea/udc.c
> +++ b/drivers/usb/chipidea/udc.c
> @@ -10,6 +10,7 @@
>  #include <linux/delay.h>
>  #include <linux/device.h>
>  #include <linux/dmapool.h>
> +#include <linux/dma-direct.h>
>  #include <linux/err.h>
>  #include <linux/irqreturn.h>
>  #include <linux/kernel.h>
> @@ -540,6 +541,126 @@ static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
>  	return ret;
>  }
>  
> +/*
> + * Verify if the scatterlist is valid by iterating each sg entry.
> + * Return invalid sg entry index which is less than num_sgs.
> + */
> +static int sglist_get_invalid_entry(struct device *dma_dev, u8 dir,
> +			struct usb_request *req)
> +{
> +	int i;
> +	struct scatterlist *s = req->sg;
> +
> +	if (req->num_sgs == 1)
> +		return 1;
> +
> +	dir = dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
> +
> +	for (i = 0; i < req->num_sgs; i++, s = sg_next(s)) {
> +		/* Only small sg (generally last sg) may be bounced. If
> +		 * that happens. we can't ensure the addr is page-aligned
> +		 * after dma map.
> +		 */
> +		if (dma_kmalloc_needs_bounce(dma_dev, s->length, dir))
> +			break;
> +
> +		/* Make sure each sg start address (except first sg) is
> +		 * page-aligned and end address (except last sg) is also
> +		 * page-aligned.
> +		 */
> +		if (i == 0) {
> +			if (!IS_ALIGNED(s->offset + s->length,
> +						CI_HDRC_PAGE_SIZE))
> +				break;
> +		} else {
> +			if (s->offset)
> +				break;
> +			if (!sg_is_last(s) && !IS_ALIGNED(s->length,
> +						CI_HDRC_PAGE_SIZE))
> +				break;
> +		}
> +	}
> +
> +	return i;
> +}
> +
> +static int sglist_do_bounce(struct ci_hw_req *hwreq, int index,
> +			bool copy, unsigned int *bounced)
> +{
> +	void *buf;
> +	int i, ret, nents, num_sgs;
> +	unsigned int rest, rounded;
> +	struct scatterlist *sg, *src, *dst;
> +
> +	nents = index + 1;
> +	ret = sg_alloc_table(&hwreq->sgt, nents, GFP_KERNEL);
> +	if (ret)
> +		return ret;
> +
> +	sg = src = hwreq->req.sg;
> +	num_sgs = hwreq->req.num_sgs;
> +	rest = hwreq->req.length;
> +	dst = hwreq->sgt.sgl;
> +
> +	for (i = 0; i < index; i++) {
> +		memcpy(dst, src, sizeof(*src));
> +		rest -= src->length;
> +		src = sg_next(src);
> +		dst = sg_next(dst);
> +	}
> +
> +	/* create one bounce buffer */
> +	rounded = round_up(rest, CI_HDRC_PAGE_SIZE);
> +	buf = kmalloc(rounded, GFP_KERNEL);
> +	if (!buf) {
> +		sg_free_table(&hwreq->sgt);
> +		return -ENOMEM;
> +	}
> +
> +	sg_set_buf(dst, buf, rounded);
> +
> +	hwreq->req.sg = hwreq->sgt.sgl;
> +	hwreq->req.num_sgs = nents;
> +	hwreq->sgt.sgl = sg;
> +	hwreq->sgt.nents = num_sgs;
> +
> +	if (copy)
> +		sg_copy_to_buffer(src, num_sgs - index, buf, rest);
> +
> +	*bounced = rest;
> +
> +	return 0;
> +}
> +
> +static void sglist_do_debounce(struct ci_hw_req *hwreq, bool copy)
> +{
> +	void *buf;
> +	int i, nents, num_sgs;
> +	struct scatterlist *sg, *src, *dst;
> +
> +	sg = hwreq->req.sg;
> +	num_sgs = hwreq->req.num_sgs;
> +	src = sg_last(sg, num_sgs);
> +	buf = sg_virt(src);
> +
> +	if (copy) {
> +		dst = hwreq->sgt.sgl;
> +		for (i = 0; i < num_sgs - 1; i++)
> +			dst = sg_next(dst);
> +
> +		nents = hwreq->sgt.nents - num_sgs + 1;
> +		sg_copy_from_buffer(dst, nents, buf, sg_dma_len(src));
> +	}
> +
> +	hwreq->req.sg = hwreq->sgt.sgl;
> +	hwreq->req.num_sgs = hwreq->sgt.nents;
> +	hwreq->sgt.sgl = sg;
> +	hwreq->sgt.nents = num_sgs;
> +
> +	kfree(buf);
> +	sg_free_table(&hwreq->sgt);
> +}
> +
>  /**
>   * _hardware_enqueue: configures a request at hardware level
>   * @hwep:   endpoint
> @@ -552,6 +673,8 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
>  	struct ci_hdrc *ci = hwep->ci;
>  	int ret = 0;
>  	struct td_node *firstnode, *lastnode;
> +	unsigned int bounced_size;
> +	struct scatterlist *sg;
>  
>  	/* don't queue twice */
>  	if (hwreq->req.status == -EALREADY)
> @@ -559,11 +682,28 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
>  
>  	hwreq->req.status = -EALREADY;
>  
> +	if (hwreq->req.num_sgs && hwreq->req.length) {
> +		ret = sglist_get_invalid_entry(ci->dev->parent, hwep->dir,
> +					&hwreq->req);
> +		if (ret < hwreq->req.num_sgs) {
> +			ret = sglist_do_bounce(hwreq, ret, hwep->dir == TX,
> +					&bounced_size);
> +			if (ret)
> +				return ret;
> +		}
> +	}
> +
>  	ret = usb_gadget_map_request_by_dev(ci->dev->parent,
>  					    &hwreq->req, hwep->dir);
>  	if (ret)
>  		return ret;
>  
> +	if (hwreq->sgt.sgl) {
> +		/* We've mapped a bigger buffer, now recover the actual size */
> +		sg = sg_last(hwreq->req.sg, hwreq->req.num_sgs);
> +		sg_dma_len(sg) = min(sg_dma_len(sg), bounced_size);
> +	}
> +
>  	if (hwreq->req.num_mapped_sgs)
>  		ret = prepare_td_for_sg(hwep, hwreq);
>  	else
> @@ -742,6 +882,10 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
>  	usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
>  					&hwreq->req, hwep->dir);
>  
> +	/* sglist bounced */
> +	if (hwreq->sgt.sgl)
> +		sglist_do_debounce(hwreq, hwep->dir == RX);
> +
>  	hwreq->req.actual += actual;
>  
>  	if (hwreq->req.status)
> @@ -1588,6 +1732,9 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
>  
>  	usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
>  
> +	if (hwreq->sgt.sgl)
> +		sglist_do_debounce(hwreq, false);
> +
>  	req->status = -ECONNRESET;
>  
>  	if (hwreq->req.complete != NULL) {
> diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
> index 5193df1e18c7..c8a47389a46b 100644
> --- a/drivers/usb/chipidea/udc.h
> +++ b/drivers/usb/chipidea/udc.h
> @@ -69,11 +69,13 @@ struct td_node {
>   * @req: request structure for gadget drivers
>   * @queue: link to QH list
>   * @tds: link to TD list
> + * @sgt: hold original sglist when bounce sglist
>   */
>  struct ci_hw_req {
>  	struct usb_request	req;
>  	struct list_head	queue;
>  	struct list_head	tds;
> +	struct sg_table		sgt;
>  };
>  
>  #ifdef CONFIG_USB_CHIPIDEA_UDC
> -- 
> 2.34.1
>
Dan Carpenter Oct. 9, 2024, 2:47 p.m. UTC | #2
Hi Xu,

kernel test robot noticed the following build warnings:

https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Xu-Yang/usb-chipidea-udc-create-bounce-buffer-for-problem-sglist-entries-if-possible/20240912-125251
base:   https://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git usb-testing
patch link:    https://lore.kernel.org/r/20240912045150.915573-2-xu.yang_2%40nxp.com
patch subject: [PATCH 2/2] usb: chipidea: udc: create bounce buffer for problem sglist entries if possible
config: i386-randconfig-141-20240914 (https://download.01.org/0day-ci/archive/20240914/202409141707.TOsGfePE-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
| Closes: https://lore.kernel.org/r/202409141707.TOsGfePE-lkp@intel.com/

New smatch warnings:
drivers/usb/chipidea/udc.c:704 _hardware_enqueue() error: uninitialized symbol 'bounced_size'.

vim +/bounced_size +704 drivers/usb/chipidea/udc.c

8e22978c57087a drivers/usb/chipidea/udc.c       Alexander Shishkin 2013-06-24  671  static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
aa69a8093ff985 drivers/usb/gadget/ci13xxx_udc.c David Lopo         2008-11-17  672  {
8e22978c57087a drivers/usb/chipidea/udc.c       Alexander Shishkin 2013-06-24  673  	struct ci_hdrc *ci = hwep->ci;
0e6ca1998e4c80 drivers/usb/gadget/ci13xxx_udc.c Pavankumar Kondeti 2011-02-18  674  	int ret = 0;
cc9e6c495b0a37 drivers/usb/chipidea/udc.c       Michael Grzeschik  2013-06-13  675  	struct td_node *firstnode, *lastnode;
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  676  	unsigned int bounced_size;
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  677  	struct scatterlist *sg;
aa69a8093ff985 drivers/usb/gadget/ci13xxx_udc.c David Lopo         2008-11-17  678  
aa69a8093ff985 drivers/usb/gadget/ci13xxx_udc.c David Lopo         2008-11-17  679  	/* don't queue twice */
2dbc5c4c831418 drivers/usb/chipidea/udc.c       Alexander Shishkin 2013-06-13  680  	if (hwreq->req.status == -EALREADY)
aa69a8093ff985 drivers/usb/gadget/ci13xxx_udc.c David Lopo         2008-11-17  681  		return -EALREADY;
aa69a8093ff985 drivers/usb/gadget/ci13xxx_udc.c David Lopo         2008-11-17  682  
2dbc5c4c831418 drivers/usb/chipidea/udc.c       Alexander Shishkin 2013-06-13  683  	hwreq->req.status = -EALREADY;
aa69a8093ff985 drivers/usb/gadget/ci13xxx_udc.c David Lopo         2008-11-17  684  
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  685  	if (hwreq->req.num_sgs && hwreq->req.length) {
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  686  		ret = sglist_get_invalid_entry(ci->dev->parent, hwep->dir,
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  687  					&hwreq->req);
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  688  		if (ret < hwreq->req.num_sgs) {
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  689  			ret = sglist_do_bounce(hwreq, ret, hwep->dir == TX,
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  690  					&bounced_size);
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  691  			if (ret)
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  692  				return ret;
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  693  		}
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  694  	}

bounced_size not initialized on else path

91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  695  
aeb78cda51005f drivers/usb/chipidea/udc.c       Arnd Bergmann      2017-03-13  696  	ret = usb_gadget_map_request_by_dev(ci->dev->parent,
aeb78cda51005f drivers/usb/chipidea/udc.c       Arnd Bergmann      2017-03-13  697  					    &hwreq->req, hwep->dir);
5e0aa49ec61e88 drivers/usb/chipidea/udc.c       Alexander Shishkin 2012-05-11  698  	if (ret)
5e0aa49ec61e88 drivers/usb/chipidea/udc.c       Alexander Shishkin 2012-05-11  699  		return ret;
5e0aa49ec61e88 drivers/usb/chipidea/udc.c       Alexander Shishkin 2012-05-11  700  
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  701  	if (hwreq->sgt.sgl) {
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  702  		/* We've mapped a bigger buffer, now recover the actual size */
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  703  		sg = sg_last(hwreq->req.sg, hwreq->req.num_sgs);
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12 @704  		sg_dma_len(sg) = min(sg_dma_len(sg), bounced_size);
                                                                                                                                     ^^^^^^^^^^^^

91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  705  	}
91db40933ca942 drivers/usb/chipidea/udc.c       Xu Yang            2024-09-12  706  
e48aa1eb443f80 drivers/usb/chipidea/udc.c       Peter Chen         2020-02-21  707  	if (hwreq->req.num_mapped_sgs)
e48aa1eb443f80 drivers/usb/chipidea/udc.c       Peter Chen         2020-02-21  708  		ret = prepare_td_for_sg(hwep, hwreq);
e48aa1eb443f80 drivers/usb/chipidea/udc.c       Peter Chen         2020-02-21  709  	else
diff mbox series

Patch

diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 5d9369d01065..dc4dae2c31b4 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -10,6 +10,7 @@ 
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/dmapool.h>
+#include <linux/dma-direct.h>
 #include <linux/err.h>
 #include <linux/irqreturn.h>
 #include <linux/kernel.h>
@@ -540,6 +541,126 @@  static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
 	return ret;
 }
 
+/*
+ * Verify if the scatterlist is valid by iterating each sg entry.
+ * Return invalid sg entry index which is less than num_sgs.
+ */
+static int sglist_get_invalid_entry(struct device *dma_dev, u8 dir,
+			struct usb_request *req)
+{
+	int i;
+	struct scatterlist *s = req->sg;
+
+	if (req->num_sgs == 1)
+		return 1;
+
+	dir = dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+	for (i = 0; i < req->num_sgs; i++, s = sg_next(s)) {
+		/* Only small sg (generally last sg) may be bounced. If
+		 * that happens. we can't ensure the addr is page-aligned
+		 * after dma map.
+		 */
+		if (dma_kmalloc_needs_bounce(dma_dev, s->length, dir))
+			break;
+
+		/* Make sure each sg start address (except first sg) is
+		 * page-aligned and end address (except last sg) is also
+		 * page-aligned.
+		 */
+		if (i == 0) {
+			if (!IS_ALIGNED(s->offset + s->length,
+						CI_HDRC_PAGE_SIZE))
+				break;
+		} else {
+			if (s->offset)
+				break;
+			if (!sg_is_last(s) && !IS_ALIGNED(s->length,
+						CI_HDRC_PAGE_SIZE))
+				break;
+		}
+	}
+
+	return i;
+}
+
+static int sglist_do_bounce(struct ci_hw_req *hwreq, int index,
+			bool copy, unsigned int *bounced)
+{
+	void *buf;
+	int i, ret, nents, num_sgs;
+	unsigned int rest, rounded;
+	struct scatterlist *sg, *src, *dst;
+
+	nents = index + 1;
+	ret = sg_alloc_table(&hwreq->sgt, nents, GFP_KERNEL);
+	if (ret)
+		return ret;
+
+	sg = src = hwreq->req.sg;
+	num_sgs = hwreq->req.num_sgs;
+	rest = hwreq->req.length;
+	dst = hwreq->sgt.sgl;
+
+	for (i = 0; i < index; i++) {
+		memcpy(dst, src, sizeof(*src));
+		rest -= src->length;
+		src = sg_next(src);
+		dst = sg_next(dst);
+	}
+
+	/* create one bounce buffer */
+	rounded = round_up(rest, CI_HDRC_PAGE_SIZE);
+	buf = kmalloc(rounded, GFP_KERNEL);
+	if (!buf) {
+		sg_free_table(&hwreq->sgt);
+		return -ENOMEM;
+	}
+
+	sg_set_buf(dst, buf, rounded);
+
+	hwreq->req.sg = hwreq->sgt.sgl;
+	hwreq->req.num_sgs = nents;
+	hwreq->sgt.sgl = sg;
+	hwreq->sgt.nents = num_sgs;
+
+	if (copy)
+		sg_copy_to_buffer(src, num_sgs - index, buf, rest);
+
+	*bounced = rest;
+
+	return 0;
+}
+
+static void sglist_do_debounce(struct ci_hw_req *hwreq, bool copy)
+{
+	void *buf;
+	int i, nents, num_sgs;
+	struct scatterlist *sg, *src, *dst;
+
+	sg = hwreq->req.sg;
+	num_sgs = hwreq->req.num_sgs;
+	src = sg_last(sg, num_sgs);
+	buf = sg_virt(src);
+
+	if (copy) {
+		dst = hwreq->sgt.sgl;
+		for (i = 0; i < num_sgs - 1; i++)
+			dst = sg_next(dst);
+
+		nents = hwreq->sgt.nents - num_sgs + 1;
+		sg_copy_from_buffer(dst, nents, buf, sg_dma_len(src));
+	}
+
+	hwreq->req.sg = hwreq->sgt.sgl;
+	hwreq->req.num_sgs = hwreq->sgt.nents;
+	hwreq->sgt.sgl = sg;
+	hwreq->sgt.nents = num_sgs;
+
+	kfree(buf);
+	sg_free_table(&hwreq->sgt);
+}
+
 /**
  * _hardware_enqueue: configures a request at hardware level
  * @hwep:   endpoint
@@ -552,6 +673,8 @@  static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
 	struct ci_hdrc *ci = hwep->ci;
 	int ret = 0;
 	struct td_node *firstnode, *lastnode;
+	unsigned int bounced_size;
+	struct scatterlist *sg;
 
 	/* don't queue twice */
 	if (hwreq->req.status == -EALREADY)
@@ -559,11 +682,28 @@  static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
 
 	hwreq->req.status = -EALREADY;
 
+	if (hwreq->req.num_sgs && hwreq->req.length) {
+		ret = sglist_get_invalid_entry(ci->dev->parent, hwep->dir,
+					&hwreq->req);
+		if (ret < hwreq->req.num_sgs) {
+			ret = sglist_do_bounce(hwreq, ret, hwep->dir == TX,
+					&bounced_size);
+			if (ret)
+				return ret;
+		}
+	}
+
 	ret = usb_gadget_map_request_by_dev(ci->dev->parent,
 					    &hwreq->req, hwep->dir);
 	if (ret)
 		return ret;
 
+	if (hwreq->sgt.sgl) {
+		/* We've mapped a bigger buffer, now recover the actual size */
+		sg = sg_last(hwreq->req.sg, hwreq->req.num_sgs);
+		sg_dma_len(sg) = min(sg_dma_len(sg), bounced_size);
+	}
+
 	if (hwreq->req.num_mapped_sgs)
 		ret = prepare_td_for_sg(hwep, hwreq);
 	else
@@ -742,6 +882,10 @@  static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
 	usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
 					&hwreq->req, hwep->dir);
 
+	/* sglist bounced */
+	if (hwreq->sgt.sgl)
+		sglist_do_debounce(hwreq, hwep->dir == RX);
+
 	hwreq->req.actual += actual;
 
 	if (hwreq->req.status)
@@ -1588,6 +1732,9 @@  static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
 
 	usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
 
+	if (hwreq->sgt.sgl)
+		sglist_do_debounce(hwreq, false);
+
 	req->status = -ECONNRESET;
 
 	if (hwreq->req.complete != NULL) {
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index 5193df1e18c7..c8a47389a46b 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -69,11 +69,13 @@  struct td_node {
  * @req: request structure for gadget drivers
  * @queue: link to QH list
  * @tds: link to TD list
+ * @sgt: hold original sglist when bounce sglist
  */
 struct ci_hw_req {
 	struct usb_request	req;
 	struct list_head	queue;
 	struct list_head	tds;
+	struct sg_table		sgt;
 };
 
 #ifdef CONFIG_USB_CHIPIDEA_UDC