@@ -400,31 +400,18 @@ static inline u8 _usb_addr(struct ci_hw_ep *ep)
return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
}
-/**
- * _hardware_enqueue: configures a request at hardware level
- * @hwep: endpoint
- * @hwreq: request
- *
- * This function returns an error code
- */
-static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
+static int prepare_td_for_non_sg(struct ci_hw_ep *hwep,
+ struct ci_hw_req *hwreq)
{
- struct ci_hdrc *ci = hwep->ci;
- int ret = 0;
unsigned rest = hwreq->req.length;
int pages = TD_PAGE_COUNT;
- struct td_node *firstnode, *lastnode;
-
- /* don't queue twice */
- if (hwreq->req.status == -EALREADY)
- return -EALREADY;
-
- hwreq->req.status = -EALREADY;
+ int ret = 0;
- ret = usb_gadget_map_request_by_dev(ci->dev->parent,
- &hwreq->req, hwep->dir);
- if (ret)
- return ret;
+ if (rest == 0) {
+ ret = add_td_to_list(hwep, hwreq, 0);
+ if (ret < 0)
+ return ret;
+ }
/*
* The first buffer could be not page aligned.
@@ -433,18 +420,13 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
if (hwreq->req.dma % PAGE_SIZE)
pages--;
- if (rest == 0) {
- ret = add_td_to_list(hwep, hwreq, 0);
- if (ret < 0)
- goto done;
- }
-
while (rest > 0) {
unsigned count = min(hwreq->req.length - hwreq->req.actual,
- (unsigned)(pages * CI_HDRC_PAGE_SIZE));
+ (unsigned)(pages * CI_HDRC_PAGE_SIZE));
+
ret = add_td_to_list(hwep, hwreq, count);
if (ret < 0)
- goto done;
+ return ret;
rest -= count;
}
@@ -453,9 +435,138 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
&& (hwreq->req.length % hwep->ep.maxpacket == 0)) {
ret = add_td_to_list(hwep, hwreq, 0);
if (ret < 0)
- goto done;
+ return ret;
}
+ return ret;
+}
+
+static int add_td_to_list_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
+ struct scatterlist *s, unsigned length)
+{
+ int i;
+ u32 temp;
+ struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
+ GFP_ATOMIC);
+
+ if (node == NULL)
+ return -ENOMEM;
+
+ node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma);
+ if (node->ptr == NULL) {
+ kfree(node);
+ return -ENOMEM;
+ }
+
+ node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
+ node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
+ node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
+ if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
+ u32 mul = hwreq->req.length / hwep->ep.maxpacket;
+
+ if (hwreq->req.length == 0
+ || hwreq->req.length % hwep->ep.maxpacket)
+ mul++;
+ node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
+ }
+
+ temp = (u32) (sg_dma_address(s) + hwreq->req.actual);
+ for (i = 0; i < TD_PAGE_COUNT; i++)
+ node->ptr->page[i] = cpu_to_le32(temp + i * CI_HDRC_PAGE_SIZE);
+
+ hwreq->req.actual += length;
+
+ if (!list_empty(&hwreq->tds)) {
+ /* get the last entry */
+ lastnode = list_entry(hwreq->tds.prev,
+ struct td_node, td);
+ lastnode->ptr->next = cpu_to_le32(node->dma);
+ }
+
+ INIT_LIST_HEAD(&node->td);
+ list_add_tail(&node->td, &hwreq->tds);
+
+ return 0;
+}
+
+static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
+ struct scatterlist *s)
+{
+ unsigned rest = sg_dma_len(s);
+ int ret = 0;
+
+ hwreq->req.actual = 0;
+ while (rest > 0) {
+ unsigned count = min(rest,
+ (unsigned)(TD_PAGE_COUNT * CI_HDRC_PAGE_SIZE));
+
+ ret = add_td_to_list_sg(hwep, hwreq, s, count);
+ if (ret < 0)
+ return ret;
+
+ rest -= count;
+ }
+
+ return ret;
+}
+
+static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
+{
+ struct usb_request *req = &hwreq->req;
+ struct scatterlist *s = req->sg;
+ int ret;
+
+ if (!s || req->zero || req->length == 0) {
+ dev_err(hwep->ci->dev, "not supported operation for sg\n");
+ return -EINVAL;
+ }
+
+ do {
+ if (sg_dma_address(s) % PAGE_SIZE) {
+ dev_err(hwep->ci->dev, "non-page aligned sg\n");
+ return -EINVAL;
+ }
+
+ ret = prepare_td_per_sg(hwep, hwreq, s);
+ if (ret)
+ return ret;
+ } while ((s = sg_next(s)));
+
+ return ret;
+}
+
+/**
+ * _hardware_enqueue: configures a request at hardware level
+ * @hwep: endpoint
+ * @hwreq: request
+ *
+ * This function returns an error code
+ */
+static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
+{
+ struct ci_hdrc *ci = hwep->ci;
+ int ret = 0;
+ struct td_node *firstnode, *lastnode;
+
+ /* don't queue twice */
+ if (hwreq->req.status == -EALREADY)
+ return -EALREADY;
+
+ hwreq->req.status = -EALREADY;
+
+ ret = usb_gadget_map_request_by_dev(ci->dev->parent,
+ &hwreq->req, hwep->dir);
+ if (ret)
+ return ret;
+
+ if (hwreq->req.num_mapped_sgs)
+ ret = prepare_td_for_sg(hwep, hwreq);
+ else
+ ret = prepare_td_for_non_sg(hwep, hwreq);
+
+ if (ret)
+ return ret;
+
firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
lastnode = list_entry(hwreq->tds.prev,
@@ -1935,6 +2046,7 @@ static int udc_start(struct ci_hdrc *ci)
ci->gadget.max_speed = USB_SPEED_HIGH;
ci->gadget.name = ci->platdata->name;
ci->gadget.otg_caps = otg_caps;
+ ci->gadget.sg_supported = 1;
if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
ci->gadget.quirk_avoids_skb_reserve = 1;