@@ -25,6 +25,7 @@
#define TD_PAGE_COUNT 5
#define CI_HDRC_PAGE_SIZE 4096ul /* page size for TD's */
#define ENDPT_MAX 32
+#define CI_MAX_BUF_SIZE (TD_PAGE_COUNT * CI_HDRC_PAGE_SIZE)
/******************************************************************************
* REGISTERS
@@ -338,7 +338,7 @@ static int hw_usb_reset(struct ci_hdrc *ci)
*****************************************************************************/
static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
- unsigned length)
+ unsigned int length, struct scatterlist *s)
{
int i;
u32 temp;
@@ -366,7 +366,13 @@ static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
}
- temp = (u32) (hwreq->req.dma + hwreq->req.actual);
+ if (s) {
+ temp = (u32) (sg_dma_address(s) + hwreq->req.actual);
+ node->td_remaining_size = CI_MAX_BUF_SIZE - length;
+ } else {
+ temp = (u32) (hwreq->req.dma + hwreq->req.actual);
+ }
+
if (length) {
node->ptr->page[0] = cpu_to_le32(temp);
for (i = 1; i < TD_PAGE_COUNT; i++) {
@@ -400,6 +406,122 @@ static inline u8 _usb_addr(struct ci_hw_ep *ep)
return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
}
+static int prepare_td_for_non_sg(struct ci_hw_ep *hwep,
+ struct ci_hw_req *hwreq)
+{
+ unsigned int rest = hwreq->req.length;
+ int pages = TD_PAGE_COUNT;
+ int ret = 0;
+
+ if (rest == 0) {
+ ret = add_td_to_list(hwep, hwreq, 0, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * The first buffer could be not page aligned.
+ * In that case we have to span into one extra td.
+ */
+ if (hwreq->req.dma % PAGE_SIZE)
+ pages--;
+
+ while (rest > 0) {
+ unsigned int count = min(hwreq->req.length - hwreq->req.actual,
+ (unsigned int)(pages * CI_HDRC_PAGE_SIZE));
+
+ ret = add_td_to_list(hwep, hwreq, count, NULL);
+ if (ret < 0)
+ return ret;
+
+ rest -= count;
+ }
+
+ if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
+ && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
+ ret = add_td_to_list(hwep, hwreq, 0, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
+ struct scatterlist *s)
+{
+ unsigned int rest = sg_dma_len(s);
+ int ret = 0;
+
+ hwreq->req.actual = 0;
+ while (rest > 0) {
+ unsigned int count = min_t(unsigned int, rest,
+ CI_MAX_BUF_SIZE);
+
+ ret = add_td_to_list(hwep, hwreq, count, s);
+ if (ret < 0)
+ return ret;
+
+ rest -= count;
+ }
+
+ return ret;
+}
+
+static void ci_add_buffer_entry(struct td_node *node, struct scatterlist *s)
+{
+ int empty_td_slot_index = (CI_MAX_BUF_SIZE - node->td_remaining_size)
+ / CI_HDRC_PAGE_SIZE;
+ int i;
+
+ node->ptr->token +=
+ cpu_to_le32(sg_dma_len(s) << __ffs(TD_TOTAL_BYTES));
+
+ for (i = empty_td_slot_index; i < TD_PAGE_COUNT; i++) {
+ u32 page = (u32) sg_dma_address(s) +
+ (i - empty_td_slot_index) * CI_HDRC_PAGE_SIZE;
+
+ page &= ~TD_RESERVED_MASK;
+ node->ptr->page[i] = cpu_to_le32(page);
+ }
+}
+
+static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
+{
+ struct usb_request *req = &hwreq->req;
+ struct scatterlist *s = req->sg;
+ int ret = 0, i = 0;
+ struct td_node *node = NULL;
+
+ if (!s || req->zero || req->length == 0) {
+ dev_err(hwep->ci->dev, "not supported operation for sg\n");
+ return -EINVAL;
+ }
+
+ while (i++ < req->num_mapped_sgs) {
+ if (sg_dma_address(s) % PAGE_SIZE) {
+ dev_err(hwep->ci->dev, "not page aligned sg buffer\n");
+ return -EINVAL;
+ }
+
+ if (node && (node->td_remaining_size >= sg_dma_len(s))) {
+ ci_add_buffer_entry(node, s);
+ node->td_remaining_size -= sg_dma_len(s);
+ } else {
+ ret = prepare_td_per_sg(hwep, hwreq, s);
+ if (ret)
+ return ret;
+
+ node = list_entry(hwreq->tds.prev,
+ struct td_node, td);
+ }
+
+ s = sg_next(s);
+ }
+
+ return ret;
+}
+
/**
* _hardware_enqueue: configures a request at hardware level
* @hwep: endpoint
@@ -411,8 +533,6 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
{
struct ci_hdrc *ci = hwep->ci;
int ret = 0;
- unsigned rest = hwreq->req.length;
- int pages = TD_PAGE_COUNT;
struct td_node *firstnode, *lastnode;
/* don't queue twice */
@@ -426,35 +546,13 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
if (ret)
return ret;
- /*
- * The first buffer could be not page aligned.
- * In that case we have to span into one extra td.
- */
- if (hwreq->req.dma % PAGE_SIZE)
- pages--;
-
- if (rest == 0) {
- ret = add_td_to_list(hwep, hwreq, 0);
- if (ret < 0)
- goto done;
- }
-
- while (rest > 0) {
- unsigned count = min(hwreq->req.length - hwreq->req.actual,
- (unsigned)(pages * CI_HDRC_PAGE_SIZE));
- ret = add_td_to_list(hwep, hwreq, count);
- if (ret < 0)
- goto done;
-
- rest -= count;
- }
+ if (hwreq->req.num_mapped_sgs)
+ ret = prepare_td_for_sg(hwep, hwreq);
+ else
+ ret = prepare_td_for_non_sg(hwep, hwreq);
- if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
- && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
- ret = add_td_to_list(hwep, hwreq, 0);
- if (ret < 0)
- goto done;
- }
+ if (ret)
+ return ret;
firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
@@ -1941,6 +2039,7 @@ static int udc_start(struct ci_hdrc *ci)
ci->gadget.max_speed = USB_SPEED_HIGH;
ci->gadget.name = ci->platdata->name;
ci->gadget.otg_caps = otg_caps;
+ ci->gadget.sg_supported = 1;
if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
ci->gadget.quirk_avoids_skb_reserve = 1;
@@ -61,6 +61,7 @@ struct td_node {
struct list_head td;
dma_addr_t dma;
struct ci_hw_td *ptr;
+ int td_remaining_size;
};
/**