@@ -102,11 +102,51 @@ static bool link_trb_toggles_cycle(union usbssp_trb *trb)
return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}
+static bool last_td_in_request(struct usbssp_td *td)
+{
+ struct usbssp_request *req_priv = td->priv_request;
+
+ return req_priv->num_tds_done == req_priv->num_tds;
+}
+
static void inc_td_cnt(struct usbssp_request *priv_req)
{
priv_req->num_tds_done++;
}
+static void trb_to_noop(union usbssp_trb *trb, u32 noop_type)
+{
+ if (trb_is_link(trb)) {
+ /* unchain chained link TRBs */
+ trb->link.control &= cpu_to_le32(~TRB_CHAIN);
+ } else {
+ trb->generic.field[0] = 0;
+ trb->generic.field[1] = 0;
+ trb->generic.field[2] = 0;
+ /* Preserve only the cycle bit of this TRB */
+ trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+ trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
+ }
+}
+
+/*
+ * Updates trb to point to the next TRB in the ring, and updates seg if the next
+ * TRB is in a new segment. This does not skip over link TRBs, and it does not
+ * effect the ring dequeue or enqueue pointers.
+ */
+static void next_trb(struct usbssp_udc *usbssp_data,
+ struct usbssp_ring *ring,
+ struct usbssp_segment **seg,
+ union usbssp_trb **trb)
+{
+ if (trb_is_link(*trb)) {
+ *seg = (*seg)->next;
+ *trb = ((*seg)->trbs);
+ } else {
+ (*trb)++;
+ }
+}
+
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
@@ -347,6 +387,157 @@ struct usbssp_ring *usbssp_triad_to_transfer_ring(struct usbssp_udc *usbssp_data
return NULL;
}
+/*
+ * Get the hw dequeue pointer DC stopped on, either directly from the
+ * endpoint context, or if streams are in use from the stream context.
+ * The returned hw_dequeue contains the lowest four bits with cycle state
+ * and possbile stream context type.
+ */
+u64 usbssp_get_hw_deq(struct usbssp_udc *usbssp_data,
+ struct usbssp_device *dev,
+ unsigned int ep_index,
+ unsigned int stream_id)
+{
+ struct usbssp_ep_ctx *ep_ctx;
+ struct usbssp_stream_ctx *st_ctx;
+ struct usbssp_ep *ep;
+
+ ep = &dev->eps[ep_index];
+
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
+ return le64_to_cpu(st_ctx->stream_ring);
+ }
+ ep_ctx = usbssp_get_ep_ctx(usbssp_data, dev->out_ctx, ep_index);
+ return le64_to_cpu(ep_ctx->deq);
+}
+
+/*
+ * Move the DC endpoint ring dequeue pointer past cur_td.
+ * Record the new state of the DC endpoint ring dequeue segment,
+ * dequeue pointer, and new consumer cycle state in state.
+ * Update our internal representation of the ring's dequeue pointer.
+ *
+ * We do this in three jumps:
+ * - First we update our new ring state to be the same as when the DC stopped.
+ * - Then we traverse the ring to find the segment that contains
+ * the last TRB in the TD. We toggle the DC new cycle state when we pass
+ * any link TRBs with the toggle cycle bit set.
+ * - Finally we move the dequeue state one TRB further, toggling the cycle bit
+ * if we've moved it past a link TRB with the toggle cycle bit set.
+ */
+void usbssp_find_new_dequeue_state(struct usbssp_udc *usbssp_data,
+ unsigned int ep_index,
+ unsigned int stream_id,
+ struct usbssp_td *cur_td,
+ struct usbssp_dequeue_state *state)
+{
+ struct usbssp_device *dev_priv = &usbssp_data->devs;
+ struct usbssp_ep *ep_priv = &dev_priv->eps[ep_index];
+ struct usbssp_ring *ep_ring;
+ struct usbssp_segment *new_seg;
+ union usbssp_trb *new_deq;
+ dma_addr_t addr;
+ u64 hw_dequeue;
+ bool cycle_found = false;
+ bool td_last_trb_found = false;
+
+ ep_ring = usbssp_triad_to_transfer_ring(usbssp_data,
+ ep_index, stream_id);
+ if (!ep_ring) {
+ dev_warn(usbssp_data->dev, "WARN can't find new dequeue state "
+ "for invalid stream ID %u.\n",
+ stream_id);
+ return;
+ }
+
+ /* Dig out the cycle state saved by the DC during the stop ep cmd */
+ usbssp_dbg_trace(usbssp_data, trace_usbssp_dbg_cancel_request,
+ "Finding endpoint context");
+
+ hw_dequeue = usbssp_get_hw_deq(usbssp_data, dev_priv,
+ ep_index, stream_id);
+ new_seg = ep_ring->deq_seg;
+ new_deq = ep_ring->dequeue;
+ state->new_cycle_state = hw_dequeue & 0x1;
+ state->stream_id = stream_id;
+
+ /*
+ * We want to find the pointer, segment and cycle state of the new trb
+ * (the one after current TD's last_trb). We know the cycle state at
+ * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
+ * found.
+ */
+ do {
+ if (!cycle_found && usbssp_trb_virt_to_dma(new_seg, new_deq)
+ == (dma_addr_t)(hw_dequeue & ~0xf)) {
+ cycle_found = true;
+ if (td_last_trb_found)
+ break;
+ }
+
+ if (new_deq == cur_td->last_trb)
+ td_last_trb_found = true;
+
+ if (cycle_found && trb_is_link(new_deq) &&
+ link_trb_toggles_cycle(new_deq))
+ state->new_cycle_state ^= 0x1;
+
+ next_trb(usbssp_data, ep_ring, &new_seg, &new_deq);
+
+ /* Search wrapped around, bail out */
+ if (new_deq == ep_priv->ring->dequeue) {
+ dev_err(usbssp_data->dev,
+ "Error: Failed finding new dequeue state\n");
+ state->new_deq_seg = NULL;
+ state->new_deq_ptr = NULL;
+ return;
+ }
+
+ } while (!cycle_found || !td_last_trb_found);
+
+ state->new_deq_seg = new_seg;
+ state->new_deq_ptr = new_deq;
+
+ /* Don't update the ring cycle state for the producer (us). */
+ usbssp_dbg_trace(usbssp_data, trace_usbssp_dbg_cancel_request,
+ "Cycle state = 0x%x", state->new_cycle_state);
+
+ usbssp_dbg_trace(usbssp_data, trace_usbssp_dbg_cancel_request,
+ "New dequeue segment = %p (virtual)",
+ state->new_deq_seg);
+ addr = usbssp_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
+ usbssp_dbg_trace(usbssp_data, trace_usbssp_dbg_cancel_request,
+ "New dequeue pointer = 0x%llx (DMA)",
+ (unsigned long long) addr);
+}
+
+/*
+ * flip_cycle means flip the cycle bit of all but the first and last TRB.
+ * (The last TRB actually points to the ring enqueue pointer, which is not part
+ * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
+ */
+static void td_to_noop(struct usbssp_udc *usbssp_data,
+ struct usbssp_ring *ep_ring,
+ struct usbssp_td *td, bool flip_cycle)
+{
+ struct usbssp_segment *seg = td->start_seg;
+ union usbssp_trb *trb = td->first_trb;
+
+ while (1) {
+ trb_to_noop(trb, TRB_TR_NOOP);
+
+ /* flip cycle if asked to */
+ if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
+ trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
+
+ if (trb == td->last_trb)
+ break;
+
+ next_trb(usbssp_data, ep_ring, &seg, &trb);
+ }
+}
+
/* Must be called with usbssp_data->lock held in interrupt context
* or usbssp_data->irq_thread_lock from thread conext (defered interrupt)
*/
@@ -365,6 +556,139 @@ void usbssp_giveback_request_in_irq(struct usbssp_udc *usbssp_data,
usbssp_gadget_giveback(req_priv->dep, req_priv, status);
}
+void usbssp_unmap_td_bounce_buffer(struct usbssp_udc *usbssp_data,
+ struct usbssp_ring *ring,
+ struct usbssp_td *td)
+{
+ /*TODO: ??? */
+}
+
+void usbssp_remove_request(struct usbssp_udc *usbssp_data,
+ struct usbssp_request *req_priv, int ep_index)
+{
+ int i = 0;
+ struct usbssp_ring *ep_ring;
+ struct usbssp_ep *ep;
+ struct usbssp_td *cur_td = NULL;
+ struct usbssp_ep_ctx *ep_ctx;
+ struct usbssp_device *priv_dev;
+ u64 hw_deq;
+ struct usbssp_dequeue_state deq_state;
+
+ memset(&deq_state, 0, sizeof(deq_state));
+ ep = &usbssp_data->devs.eps[ep_index];
+
+ priv_dev = &usbssp_data->devs;
+ ep_ctx = usbssp_get_ep_ctx(usbssp_data, priv_dev->out_ctx, ep_index);
+ trace_usbssp_remove_request(ep_ctx);
+
+ i = req_priv->num_tds_done;
+
+ for (; i < req_priv->num_tds; i++) {
+ cur_td = &req_priv->td[i];
+ usbssp_dbg_trace(usbssp_data, trace_usbssp_dbg_cancel_request,
+ "Removing canceled TD starting at 0x%llx (dma).",
+ (unsigned long long)usbssp_trb_virt_to_dma(
+ cur_td->start_seg,
+ cur_td->first_trb));
+
+ ep_ring = usbssp_request_to_transfer_ring(usbssp_data,
+ cur_td->priv_request);
+
+ if (!ep_ring) {
+ /*
+ * This shouldn't happen unless a driver is mucking
+ * with the stream ID after submission. This will
+ * leave the TD on the hardware ring, and the hardware
+ * will try to execute it, and may access a buffer
+ * that has already been freed. In the best case, the
+ * hardware will execute it, and the event handler will
+ * ignore the completion event for that TD, since it was
+ * removed from the td_list for that endpoint. In
+ * short, don't muck with the stream ID after
+ * submission.
+ */
+ dev_warn(usbssp_data->dev, "WARN Cancelled USB Request %p"
+ " has invalid stream ID %u.\n",
+ cur_td->priv_request,
+ cur_td->priv_request->request.stream_id);
+ goto remove_finished_td;
+ }
+
+ if (!(ep->ep_state & USBSSP_EP_ENABLED) ||
+ ep->ep_state & USBSSP_EP_DISABLE_PENDING) {
+ goto remove_finished_td;
+ }
+
+ /*
+ * If we stopped on the TD we need to cancel, then we have to
+ * move the DC endpoint ring dequeue pointer past this TD.
+ */
+ hw_deq = usbssp_get_hw_deq(usbssp_data, priv_dev, ep_index,
+ cur_td->priv_request->request.stream_id);
+ hw_deq &= ~0xf;
+
+ if (usbssp_trb_in_td(usbssp_data, cur_td->start_seg,
+ cur_td->first_trb, cur_td->last_trb, hw_deq, false)) {
+ usbssp_find_new_dequeue_state(usbssp_data, ep_index,
+ cur_td->priv_request->request.stream_id,
+ cur_td, &deq_state);
+ } else {
+ td_to_noop(usbssp_data, ep_ring, cur_td, false);
+ }
+
+remove_finished_td:
+ /*
+ * The event handler won't see a completion for this TD anymore,
+ * so remove it from the endpoint ring's TD list.
+ */
+ list_del_init(&cur_td->td_list);
+ }
+
+ ep->ep_state &= ~EP_STOP_CMD_PENDING;
+
+ if (!(ep->ep_state & USBSSP_EP_DISABLE_PENDING) &&
+ ep->ep_state & USBSSP_EP_ENABLED) {
+ /* If necessary, queue a Set Transfer Ring Dequeue Pointer command*/
+ if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
+ usbssp_queue_new_dequeue_state(usbssp_data, ep_index,
+ &deq_state);
+ usbssp_ring_cmd_db(usbssp_data);
+ } else {
+ /*
+ * Otherwise ring the doorbell(s) to restart queued
+ * transfers
+ */
+ ring_doorbell_for_active_rings(usbssp_data, ep_index);
+ }
+ }
+
+ /*
+ * Complete the cancellation of USB request.
+ */
+ i = req_priv->num_tds_done;
+ for (; i < req_priv->num_tds; i++) {
+ cur_td = &req_priv->td[i];
+
+ /*
+ * Clean up the cancelled USB Request
+ * Doesn't matter what we pass for status, since the core will
+ * just overwrite it.
+ */
+ ep_ring = usbssp_request_to_transfer_ring(usbssp_data,
+ cur_td->priv_request);
+
+ usbssp_unmap_td_bounce_buffer(usbssp_data, ep_ring, cur_td);
+
+ inc_td_cnt(cur_td->priv_request);
+ if (last_td_in_request(cur_td)) {
+ usbssp_giveback_request_in_irq(usbssp_data,
+ cur_td, -ECONNRESET);
+ }
+ }
+}
+
+
/*
* When we get a command completion for a Stop Endpoint Command, we need to
* stop timer and clear EP_STOP_CMD_PENDING flag.
@@ -385,7 +709,6 @@ static void usbssp_handle_cmd_stop_ep(struct usbssp_udc *usbssp_data,
"CMD stop endpoint completion for ep index: %d - %s\n",
ep_index, ep->name);
-
priv_dev = &usbssp_data->devs;
ep_ctx = usbssp_get_ep_ctx(usbssp_data, priv_dev->out_ctx, ep_index);
trace_usbssp_handle_cmd_stop_ep(ep_ctx);
@@ -2273,7 +2596,8 @@ void usbssp_queue_new_dequeue_state(struct usbssp_udc *usbssp_data,
(unsigned long long)deq_state->new_deq_seg->dma,
deq_state->new_deq_ptr,
(unsigned long long)usbssp_trb_virt_to_dma(
- deq_state->new_deq_seg, deq_state->new_deq_ptr),
+ deq_state->new_deq_seg,
+ deq_state->new_deq_ptr),
deq_state->new_cycle_state);
addr = usbssp_trb_virt_to_dma(deq_state->new_deq_seg,
@@ -2284,6 +2608,7 @@ void usbssp_queue_new_dequeue_state(struct usbssp_udc *usbssp_data,
deq_state->new_deq_seg, deq_state->new_deq_ptr);
return;
}
+
ep_priv = &usbssp_data->devs.eps[ep_index];
if ((ep_priv->ep_state & SET_DEQ_PENDING)) {
dev_warn(usbssp_data->dev, "WARN Cannot submit Set TR Deq Ptr\n");
@@ -2304,10 +2629,12 @@ void usbssp_queue_new_dequeue_state(struct usbssp_udc *usbssp_data,
ep_priv->queued_deq_ptr = deq_state->new_deq_ptr;
if (deq_state->stream_id)
trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
+
ret = queue_command(usbssp_data, cmd,
lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);
+
if (ret < 0) {
usbssp_free_command(usbssp_data, cmd);
return;
@@ -545,8 +545,53 @@ int usbssp_enqueue(struct usbssp_ep *dep, struct usbssp_request *req_priv)
*/
int usbssp_dequeue(struct usbssp_ep *ep_priv, struct usbssp_request *req_priv)
{
- /*TODO: this function must be implemented*/
- return 0;
+ int ret = 0, i;
+ struct usbssp_udc *usbssp_data;
+ unsigned int ep_index;
+ struct usbssp_ring *ep_ring;
+ struct usbssp_device *priv_dev;
+ struct usbssp_ep_ctx *ep_ctx;
+
+ usbssp_data = ep_priv->usbssp_data;
+ trace_usbssp_request_dequeue(&req_priv->request);
+
+ priv_dev = &usbssp_data->devs;
+ ep_index = usbssp_get_endpoint_index(req_priv->dep->endpoint.desc);
+ ep_priv = &usbssp_data->devs.eps[ep_index];
+ ep_ring = usbssp_request_to_transfer_ring(usbssp_data, req_priv);
+
+ if (!ep_ring)
+ goto err_giveback;
+
+ i = req_priv->num_tds_done;
+
+ if (i < req_priv->num_tds)
+ usbssp_dbg_trace(usbssp_data, trace_usbssp_dbg_cancel_request,
+ "Cancel request %p, dev %s, ep 0x%x, "
+ "starting at offset 0x%llx",
+ &req_priv->request, usbssp_data->gadget.name,
+ req_priv->dep->endpoint.desc->bEndpointAddress,
+ (unsigned long long) usbssp_trb_virt_to_dma(
+ req_priv->td[i].start_seg,
+ req_priv->td[i].first_trb));
+
+ /* Queue a stop endpoint command, but only if it is
+ * in EP_STATE_RUNNING state.
+ */
+ ep_ctx = usbssp_get_ep_ctx(usbssp_data, priv_dev->out_ctx, ep_index);
+ if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
+ ret = usbssp_cmd_stop_ep(usbssp_data, &usbssp_data->gadget,
+ ep_priv);
+ if (ret)
+ return ret;
+ }
+
+ usbssp_remove_request(usbssp_data, req_priv, ep_index);
+ return ret;
+
+err_giveback:
+ usbssp_giveback_request_in_irq(usbssp_data, req_priv->td, -ESHUTDOWN);
+ return ret;
}
int usbssp_halt_endpoint(struct usbssp_udc *usbssp_data, struct usbssp_ep *dep,
@@ -1780,6 +1780,14 @@ int usbssp_queue_halt_endpoint(struct usbssp_udc *usbssp_data,
unsigned int ep_index);
int usbssp_queue_reset_device(struct usbssp_udc *usbssp_data,
struct usbssp_command *cmd);
+void usbssp_find_new_dequeue_state(struct usbssp_udc *usbssp_data,
+ unsigned int ep_index,
+ unsigned int stream_id,
+ struct usbssp_td *cur_td,
+ struct usbssp_dequeue_state *state);
+void usbssp_queue_new_dequeue_state(struct usbssp_udc *usbssp_data,
+ unsigned int ep_index,
+ struct usbssp_dequeue_state *deq_state);
void usbssp_handle_command_timeout(struct work_struct *work);
void usbssp_cleanup_command_queue(struct usbssp_udc *usbssp_data);
@@ -2313,4 +2321,7 @@ __le32 __iomem *usbssp_get_port_io_addr(struct usbssp_udc *usbssp_data);
void usbssp_giveback_request_in_irq(struct usbssp_udc *usbssp_data,
struct usbssp_td *cur_td, int status);
+void usbssp_remove_request(struct usbssp_udc *usbssp_data,
+ struct usbssp_request *req_priv, int ep_index);
+
#endif /* __LINUX_USBSSP_GADGET_H */
Patch adds functionality that allows to remove the request from the endpoint ring. This may cause the DC to stop USB transfers, potentially stopping in the middle of a TRB buffer. The DC should pick up where it left off in the TD, unless a Set Transfer Ring Dequeue Pointer is issued. Signed-off-by: Pawel Laszczak <pawell@cadence.com> --- drivers/usb/usbssp/gadget-ring.c | 331 ++++++++++++++++++++++++++++++- drivers/usb/usbssp/gadget.c | 49 ++++- drivers/usb/usbssp/gadget.h | 11 + 3 files changed, 387 insertions(+), 4 deletions(-)