Lines Matching +full:generic +full:- +full:xhci

1 // SPDX-License-Identifier: GPL-2.0
3 * xHCI host controller driver
26 * until you reach a non-link TRB.
59 #include <linux/dma-mapping.h>
60 #include "xhci.h"
61 #include "xhci-trace.h"
63 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
76 if (!seg || !trb || trb < seg->trbs) in xhci_trb_virt_to_dma()
79 segment_offset = trb - seg->trbs; in xhci_trb_virt_to_dma()
82 return seg->dma + (segment_offset * sizeof(*trb)); in xhci_trb_virt_to_dma()
87 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); in trb_is_noop()
92 return TRB_TYPE_LINK_LE32(trb->link.control); in trb_is_link()
97 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; in last_trb_on_seg()
103 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); in last_trb_on_ring()
108 return le32_to_cpu(trb->link.control) & LINK_TOGGLE; in link_trb_toggles_cycle()
113 struct urb_priv *urb_priv = td->urb->hcpriv; in last_td_in_urb()
115 return urb_priv->num_tds_done == urb_priv->num_tds; in last_td_in_urb()
120 return ((le32_to_cpu(ring->dequeue->event_cmd.flags) & TRB_CYCLE) == in unhandled_event_trb()
121 ring->cycle_state); in unhandled_event_trb()
126 struct urb_priv *urb_priv = urb->hcpriv; in inc_td_cnt()
128 urb_priv->num_tds_done++; in inc_td_cnt()
135 trb->link.control &= cpu_to_le32(~TRB_CHAIN); in trb_to_noop()
137 trb->generic.field[0] = 0; in trb_to_noop()
138 trb->generic.field[1] = 0; in trb_to_noop()
139 trb->generic.field[2] = 0; in trb_to_noop()
141 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); in trb_to_noop()
142 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); in trb_to_noop()
154 *seg = (*seg)->next; in next_trb()
155 *trb = ((*seg)->trbs); in next_trb()
164 void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) in inc_deq() argument
169 if (ring->type == TYPE_EVENT) { in inc_deq()
170 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) { in inc_deq()
171 ring->dequeue++; in inc_deq()
174 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) in inc_deq()
175 ring->cycle_state ^= 1; in inc_deq()
176 ring->deq_seg = ring->deq_seg->next; in inc_deq()
177 ring->dequeue = ring->deq_seg->trbs; in inc_deq()
185 if (!trb_is_link(ring->dequeue)) { in inc_deq()
186 if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) in inc_deq()
187 xhci_warn(xhci, "Missing link TRB at end of segment\n"); in inc_deq()
189 ring->dequeue++; in inc_deq()
192 while (trb_is_link(ring->dequeue)) { in inc_deq()
193 ring->deq_seg = ring->deq_seg->next; in inc_deq()
194 ring->dequeue = ring->deq_seg->trbs; in inc_deq()
198 if (link_trb_count++ > ring->num_segs) { in inc_deq()
199 xhci_warn(xhci, "Ring is an endless link TRB loop\n"); in inc_deq()
217 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
222 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, in inc_enq() argument
229 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; in inc_enq()
231 if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) { in inc_enq()
232 xhci_err(xhci, "Tried to move enqueue past ring segment\n"); in inc_enq()
236 next = ++(ring->enqueue); in inc_enq()
255 if (!xhci_link_chain_quirk(xhci, ring->type)) { in inc_enq()
256 next->link.control &= cpu_to_le32(~TRB_CHAIN); in inc_enq()
257 next->link.control |= cpu_to_le32(chain); in inc_enq()
261 next->link.control ^= cpu_to_le32(TRB_CYCLE); in inc_enq()
265 ring->cycle_state ^= 1; in inc_enq()
267 ring->enq_seg = ring->enq_seg->next; in inc_enq()
268 ring->enqueue = ring->enq_seg->trbs; in inc_enq()
269 next = ring->enqueue; in inc_enq()
273 if (link_trb_count++ > ring->num_segs) { in inc_enq()
274 xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__); in inc_enq()
288 struct xhci_segment *enq_seg = ring->enq_seg; in xhci_num_trbs_free()
289 union xhci_trb *enq = ring->enqueue; in xhci_num_trbs_free()
296 enq_seg = enq_seg->next; in xhci_num_trbs_free()
297 enq = enq_seg->trbs; in xhci_num_trbs_free()
301 if (enq == ring->dequeue) in xhci_num_trbs_free()
302 return ring->num_segs * (TRBS_PER_SEGMENT - 1); in xhci_num_trbs_free()
305 if (ring->deq_seg == enq_seg && ring->dequeue >= enq) in xhci_num_trbs_free()
306 return free + (ring->dequeue - enq); in xhci_num_trbs_free()
307 last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1]; in xhci_num_trbs_free()
308 free += last_on_seg - enq; in xhci_num_trbs_free()
309 enq_seg = enq_seg->next; in xhci_num_trbs_free()
310 enq = enq_seg->trbs; in xhci_num_trbs_free()
311 } while (i++ < ring->num_segs); in xhci_num_trbs_free()
322 static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhci_ring *ring, in xhci_ring_expansion_needed() argument
330 enq_used = ring->enqueue - ring->enq_seg->trbs; in xhci_ring_expansion_needed()
333 trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1); in xhci_ring_expansion_needed()
345 if (trb_is_link(ring->enqueue) && ring->enq_seg->next->trbs == ring->dequeue) in xhci_ring_expansion_needed()
348 new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1)); in xhci_ring_expansion_needed()
349 seg = ring->enq_seg; in xhci_ring_expansion_needed()
352 seg = seg->next; in xhci_ring_expansion_needed()
353 if (seg == ring->deq_seg) { in xhci_ring_expansion_needed()
354 xhci_dbg(xhci, "Adding %d trbs requires expanding ring by %d segments\n", in xhci_ring_expansion_needed()
358 new_segs--; in xhci_ring_expansion_needed()
365 void xhci_ring_cmd_db(struct xhci_hcd *xhci) in xhci_ring_cmd_db() argument
367 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) in xhci_ring_cmd_db()
370 xhci_dbg(xhci, "// Ding dong!\n"); in xhci_ring_cmd_db()
374 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
376 readl(&xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
379 static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci) in xhci_mod_cmd_timer() argument
381 return mod_delayed_work(system_wq, &xhci->cmd_timer, in xhci_mod_cmd_timer()
382 msecs_to_jiffies(xhci->current_cmd->timeout_ms)); in xhci_mod_cmd_timer()
385 static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci) in xhci_next_queued_cmd() argument
387 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command, in xhci_next_queued_cmd()
392 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
394 * This must be called with command ring stopped and xhci->lock held.
396 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, in xhci_handle_stopped_cmd_ring() argument
401 /* Turn all aborted commands in list to no-ops, then restart */ in xhci_handle_stopped_cmd_ring()
402 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { in xhci_handle_stopped_cmd_ring()
404 if (i_cmd->status != COMP_COMMAND_ABORTED) in xhci_handle_stopped_cmd_ring()
407 i_cmd->status = COMP_COMMAND_RING_STOPPED; in xhci_handle_stopped_cmd_ring()
409 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", in xhci_handle_stopped_cmd_ring()
410 i_cmd->command_trb); in xhci_handle_stopped_cmd_ring()
412 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP); in xhci_handle_stopped_cmd_ring()
416 * completion event is received for these no-op commands in xhci_handle_stopped_cmd_ring()
420 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_handle_stopped_cmd_ring()
423 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && in xhci_handle_stopped_cmd_ring()
424 !(xhci->xhc_state & XHCI_STATE_DYING)) { in xhci_handle_stopped_cmd_ring()
425 xhci->current_cmd = cur_cmd; in xhci_handle_stopped_cmd_ring()
427 xhci_mod_cmd_timer(xhci); in xhci_handle_stopped_cmd_ring()
428 xhci_ring_cmd_db(xhci); in xhci_handle_stopped_cmd_ring()
432 /* Must be called with xhci->lock held, releases and acquires lock back */
433 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) in xhci_abort_cmd_ring() argument
435 struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; in xhci_abort_cmd_ring()
436 union xhci_trb *new_deq = xhci->cmd_ring->dequeue; in xhci_abort_cmd_ring()
440 xhci_dbg(xhci, "Abort command ring\n"); in xhci_abort_cmd_ring()
442 reinit_completion(&xhci->cmd_ring_stop_completion); in xhci_abort_cmd_ring()
457 xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); in xhci_abort_cmd_ring()
459 /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the in xhci_abort_cmd_ring()
461 * seconds then driver handles it as if host died (-ENODEV). in xhci_abort_cmd_ring()
462 * In the future we should distinguish between -ENODEV and -ETIMEDOUT in xhci_abort_cmd_ring()
463 * and try to recover a -ETIMEDOUT with a host controller reset. in xhci_abort_cmd_ring()
465 ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring, in xhci_abort_cmd_ring()
469 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); in xhci_abort_cmd_ring()
470 xhci_halt(xhci); in xhci_abort_cmd_ring()
471 xhci_hc_died(xhci); in xhci_abort_cmd_ring()
480 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_abort_cmd_ring()
481 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion, in xhci_abort_cmd_ring()
483 spin_lock_irqsave(&xhci->lock, flags); in xhci_abort_cmd_ring()
485 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n"); in xhci_abort_cmd_ring()
486 xhci_cleanup_command_queue(xhci); in xhci_abort_cmd_ring()
488 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci)); in xhci_abort_cmd_ring()
493 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, in xhci_ring_ep_doorbell() argument
498 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; in xhci_ring_ep_doorbell()
499 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_ring_ep_doorbell()
500 unsigned int ep_state = ep->ep_state; in xhci_ring_ep_doorbell()
520 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, in ring_doorbell_for_active_rings() argument
527 ep = &xhci->devs[slot_id]->eps[ep_index]; in ring_doorbell_for_active_rings()
530 if (!(ep->ep_state & EP_HAS_STREAMS)) { in ring_doorbell_for_active_rings()
531 if (ep->ring && !(list_empty(&ep->ring->td_list))) in ring_doorbell_for_active_rings()
532 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); in ring_doorbell_for_active_rings()
536 for (stream_id = 1; stream_id < ep->stream_info->num_streams; in ring_doorbell_for_active_rings()
538 struct xhci_stream_info *stream_info = ep->stream_info; in ring_doorbell_for_active_rings()
539 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) in ring_doorbell_for_active_rings()
540 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, in ring_doorbell_for_active_rings()
545 void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, in xhci_ring_doorbell_for_active_rings() argument
549 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_ring_doorbell_for_active_rings()
552 static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, in xhci_get_virt_ep() argument
557 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); in xhci_get_virt_ep()
561 xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index); in xhci_get_virt_ep()
564 if (!xhci->devs[slot_id]) { in xhci_get_virt_ep()
565 xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id); in xhci_get_virt_ep()
569 return &xhci->devs[slot_id]->eps[ep_index]; in xhci_get_virt_ep()
572 static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci, in xhci_virt_ep_to_ring() argument
577 if (!(ep->ep_state & EP_HAS_STREAMS)) in xhci_virt_ep_to_ring()
578 return ep->ring; in xhci_virt_ep_to_ring()
580 if (!ep->stream_info) in xhci_virt_ep_to_ring()
583 if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) { in xhci_virt_ep_to_ring()
584 xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n", in xhci_virt_ep_to_ring()
585 stream_id, ep->vdev->slot_id, ep->ep_index); in xhci_virt_ep_to_ring()
589 return ep->stream_info->stream_rings[stream_id]; in xhci_virt_ep_to_ring()
596 struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, in xhci_triad_to_transfer_ring() argument
602 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_triad_to_transfer_ring()
606 return xhci_virt_ep_to_ring(xhci, ep, stream_id); in xhci_triad_to_transfer_ring()
616 static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, in xhci_get_hw_deq() argument
623 ep = &vdev->eps[ep_index]; in xhci_get_hw_deq()
625 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_get_hw_deq()
626 st_ctx = &ep->stream_info->stream_ctx_array[stream_id]; in xhci_get_hw_deq()
627 return le64_to_cpu(st_ctx->stream_ring); in xhci_get_hw_deq()
629 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); in xhci_get_hw_deq()
630 return le64_to_cpu(ep_ctx->deq); in xhci_get_hw_deq()
633 static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, in xhci_move_dequeue_past_td() argument
637 struct xhci_virt_device *dev = xhci->devs[slot_id]; in xhci_move_dequeue_past_td()
638 struct xhci_virt_ep *ep = &dev->eps[ep_index]; in xhci_move_dequeue_past_td()
651 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, in xhci_move_dequeue_past_td()
654 xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n", in xhci_move_dequeue_past_td()
656 return -ENODEV; in xhci_move_dequeue_past_td()
659 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); in xhci_move_dequeue_past_td()
660 new_seg = ep_ring->deq_seg; in xhci_move_dequeue_past_td()
661 new_deq = ep_ring->dequeue; in xhci_move_dequeue_past_td()
677 if (new_deq == td->end_trb) in xhci_move_dequeue_past_td()
687 if (new_deq == ep->ring->dequeue) { in xhci_move_dequeue_past_td()
688 xhci_err(xhci, "Error: Failed finding new dequeue state\n"); in xhci_move_dequeue_past_td()
689 return -EINVAL; in xhci_move_dequeue_past_td()
697 xhci_warn(xhci, "Can't find dma of new dequeue ptr\n"); in xhci_move_dequeue_past_td()
698 xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq); in xhci_move_dequeue_past_td()
699 return -EINVAL; in xhci_move_dequeue_past_td()
702 if ((ep->ep_state & SET_DEQ_PENDING)) { in xhci_move_dequeue_past_td()
703 xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n", in xhci_move_dequeue_past_td()
705 return -EBUSY; in xhci_move_dequeue_past_td()
709 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_move_dequeue_past_td()
711 xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr); in xhci_move_dequeue_past_td()
712 return -ENOMEM; in xhci_move_dequeue_past_td()
717 ret = queue_command(xhci, cmd, in xhci_move_dequeue_past_td()
723 xhci_free_command(xhci, cmd); in xhci_move_dequeue_past_td()
726 ep->queued_deq_seg = new_seg; in xhci_move_dequeue_past_td()
727 ep->queued_deq_ptr = new_deq; in xhci_move_dequeue_past_td()
729 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_move_dequeue_past_td()
737 ep->ep_state |= SET_DEQ_PENDING; in xhci_move_dequeue_past_td()
738 xhci_ring_cmd_db(xhci); in xhci_move_dequeue_past_td()
748 struct xhci_segment *seg = td->start_seg; in td_to_noop()
749 union xhci_trb *trb = td->start_trb; in td_to_noop()
755 if (flip_cycle && trb != td->start_trb && trb != td->end_trb) in td_to_noop()
756 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); in td_to_noop()
758 if (trb == td->end_trb) in td_to_noop()
765 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, in xhci_giveback_urb_in_irq() argument
768 struct urb *urb = cur_td->urb; in xhci_giveback_urb_in_irq()
769 struct urb_priv *urb_priv = urb->hcpriv; in xhci_giveback_urb_in_irq()
770 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); in xhci_giveback_urb_in_irq()
772 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { in xhci_giveback_urb_in_irq()
773 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; in xhci_giveback_urb_in_irq()
774 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_giveback_urb_in_irq()
775 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_giveback_urb_in_irq()
785 static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, in xhci_unmap_td_bounce_buffer() argument
788 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_unmap_td_bounce_buffer()
789 struct xhci_segment *seg = td->bounce_seg; in xhci_unmap_td_bounce_buffer()
790 struct urb *urb = td->urb; in xhci_unmap_td_bounce_buffer()
797 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, in xhci_unmap_td_bounce_buffer()
802 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, in xhci_unmap_td_bounce_buffer()
805 if (urb->num_sgs) { in xhci_unmap_td_bounce_buffer()
806 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, in xhci_unmap_td_bounce_buffer()
807 seg->bounce_len, seg->bounce_offs); in xhci_unmap_td_bounce_buffer()
808 if (len != seg->bounce_len) in xhci_unmap_td_bounce_buffer()
809 xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", in xhci_unmap_td_bounce_buffer()
810 len, seg->bounce_len); in xhci_unmap_td_bounce_buffer()
812 memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, in xhci_unmap_td_bounce_buffer()
813 seg->bounce_len); in xhci_unmap_td_bounce_buffer()
815 seg->bounce_len = 0; in xhci_unmap_td_bounce_buffer()
816 seg->bounce_offs = 0; in xhci_unmap_td_bounce_buffer()
819 static void xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, in xhci_td_cleanup() argument
825 urb = td->urb; in xhci_td_cleanup()
828 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); in xhci_td_cleanup()
832 * length, urb->actual_length will be a very big number (since it's in xhci_td_cleanup()
835 if (urb->actual_length > urb->transfer_buffer_length) { in xhci_td_cleanup()
836 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", in xhci_td_cleanup()
837 urb->transfer_buffer_length, urb->actual_length); in xhci_td_cleanup()
838 urb->actual_length = 0; in xhci_td_cleanup()
842 if (!list_empty(&td->td_list)) in xhci_td_cleanup()
843 list_del_init(&td->td_list); in xhci_td_cleanup()
845 if (!list_empty(&td->cancelled_td_list)) in xhci_td_cleanup()
846 list_del_init(&td->cancelled_td_list); in xhci_td_cleanup()
851 if ((urb->actual_length != urb->transfer_buffer_length && in xhci_td_cleanup()
852 (urb->transfer_flags & URB_SHORT_NOT_OK)) || in xhci_td_cleanup()
853 (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) in xhci_td_cleanup()
854 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", in xhci_td_cleanup()
855 urb, urb->actual_length, in xhci_td_cleanup()
856 urb->transfer_buffer_length, status); in xhci_td_cleanup()
859 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) in xhci_td_cleanup()
861 xhci_giveback_urb_in_irq(xhci, td, status); in xhci_td_cleanup()
866 static void xhci_dequeue_td(struct xhci_hcd *xhci, struct xhci_td *td, struct xhci_ring *ring, in xhci_dequeue_td() argument
869 ring->dequeue = td->end_trb; in xhci_dequeue_td()
870 ring->deq_seg = td->end_seg; in xhci_dequeue_td()
871 inc_deq(xhci, ring); in xhci_dequeue_td()
873 xhci_td_cleanup(xhci, td, ring, status); in xhci_dequeue_td()
882 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, in xhci_giveback_invalidated_tds()
885 ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_giveback_invalidated_tds()
887 if (td->cancel_status == TD_CLEARED) { in xhci_giveback_invalidated_tds()
888 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", in xhci_giveback_invalidated_tds()
889 __func__, td->urb); in xhci_giveback_invalidated_tds()
890 xhci_td_cleanup(ep->xhci, td, ring, td->status); in xhci_giveback_invalidated_tds()
892 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", in xhci_giveback_invalidated_tds()
893 __func__, td->urb, td->cancel_status); in xhci_giveback_invalidated_tds()
895 if (ep->xhci->xhc_state & XHCI_STATE_DYING) in xhci_giveback_invalidated_tds()
900 static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, in xhci_reset_halted_ep() argument
906 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_reset_halted_ep()
908 ret = -ENOMEM; in xhci_reset_halted_ep()
912 xhci_dbg(xhci, "%s-reset ep %u, slot %u\n", in xhci_reset_halted_ep()
916 ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); in xhci_reset_halted_ep()
919 xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n", in xhci_reset_halted_ep()
924 static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci, in xhci_handle_halted_endpoint() argument
929 unsigned int slot_id = ep->vdev->slot_id; in xhci_handle_halted_endpoint()
936 if (ep->vdev->flags & VDEV_PORT_ERROR) in xhci_handle_halted_endpoint()
937 return -ENODEV; in xhci_handle_halted_endpoint()
941 ep->ep_state |= EP_HARD_CLEAR_TOGGLE; in xhci_handle_halted_endpoint()
942 if (td && list_empty(&td->cancelled_td_list)) { in xhci_handle_halted_endpoint()
943 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); in xhci_handle_halted_endpoint()
944 td->cancel_status = TD_HALTED; in xhci_handle_halted_endpoint()
948 if (ep->ep_state & EP_HALTED) { in xhci_handle_halted_endpoint()
949 xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n", in xhci_handle_halted_endpoint()
950 ep->ep_index); in xhci_handle_halted_endpoint()
954 err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); in xhci_handle_halted_endpoint()
958 ep->ep_state |= EP_HALTED; in xhci_handle_halted_endpoint()
960 xhci_ring_cmd_db(xhci); in xhci_handle_halted_endpoint()
967 * We have the xHCI lock, so nothing can modify this list until we drop it.
968 * We're also in the event handler, so we can't get re-interrupted if another
976 struct xhci_hcd *xhci; in xhci_invalidate_cancelled_tds() local
982 unsigned int slot_id = ep->vdev->slot_id; in xhci_invalidate_cancelled_tds()
989 if (ep->ep_state & SET_DEQ_PENDING) in xhci_invalidate_cancelled_tds()
992 xhci = ep->xhci; in xhci_invalidate_cancelled_tds()
994 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { in xhci_invalidate_cancelled_tds()
995 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_invalidate_cancelled_tds()
998 td->start_seg, td->start_trb), in xhci_invalidate_cancelled_tds()
999 td->urb->stream_id, td->urb); in xhci_invalidate_cancelled_tds()
1000 list_del_init(&td->td_list); in xhci_invalidate_cancelled_tds()
1001 ring = xhci_urb_to_transfer_ring(xhci, td->urb); in xhci_invalidate_cancelled_tds()
1003 xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n", in xhci_invalidate_cancelled_tds()
1004 td->urb, td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1013 hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index, in xhci_invalidate_cancelled_tds()
1014 td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1017 if (td->cancel_status == TD_HALTED || trb_in_td(xhci, td, hw_deq, false)) { in xhci_invalidate_cancelled_tds()
1018 switch (td->cancel_status) { in xhci_invalidate_cancelled_tds()
1019 case TD_CLEARED: /* TD is already no-op */ in xhci_invalidate_cancelled_tds()
1026 if (cached_td->urb->stream_id != td->urb->stream_id) { in xhci_invalidate_cancelled_tds()
1028 xhci_dbg(xhci, in xhci_invalidate_cancelled_tds()
1030 td->urb->stream_id, td->urb); in xhci_invalidate_cancelled_tds()
1031 td->cancel_status = TD_CLEARING_CACHE_DEFERRED; in xhci_invalidate_cancelled_tds()
1036 xhci_warn(xhci, in xhci_invalidate_cancelled_tds()
1038 td->urb, cached_td->urb, in xhci_invalidate_cancelled_tds()
1039 td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1041 cached_td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1044 td->cancel_status = TD_CLEARING_CACHE; in xhci_invalidate_cancelled_tds()
1050 td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1058 err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index, in xhci_invalidate_cancelled_tds()
1059 cached_td->urb->stream_id, in xhci_invalidate_cancelled_tds()
1062 /* Failed to move past cached td, just set cached TDs to no-op */ in xhci_invalidate_cancelled_tds()
1063 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { in xhci_invalidate_cancelled_tds()
1069 if (td->cancel_status != TD_CLEARING_CACHE && in xhci_invalidate_cancelled_tds()
1070 td->cancel_status != TD_CLEARING_CACHE_DEFERRED) in xhci_invalidate_cancelled_tds()
1072 xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", in xhci_invalidate_cancelled_tds()
1073 td->urb); in xhci_invalidate_cancelled_tds()
1075 td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1086 * Call under xhci->lock on a stopped endpoint.
1096 * Only call for non-running rings without streams.
1103 if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */ in find_halted_td()
1104 hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0); in find_halted_td()
1106 td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list); in find_halted_td()
1107 if (trb_in_td(ep->xhci, td, hw_deq, false)) in find_halted_td()
1120 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
1123 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_stop_ep() argument
1134 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { in xhci_handle_cmd_stop_ep()
1135 if (!xhci->devs[slot_id]) in xhci_handle_cmd_stop_ep()
1136 xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n", in xhci_handle_cmd_stop_ep()
1141 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_stop_ep()
1142 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
1146 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_stop_ep()
1159 * Proper error code is unknown here, it would be -EPIPE if device side in xhci_handle_cmd_stop_ep()
1160 * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error) in xhci_handle_cmd_stop_ep()
1161 * We use -EPROTO, if device is stalled it should return a stall error on in xhci_handle_cmd_stop_ep()
1162 * next transfer, which then will return -EPIPE, and device side stall is in xhci_handle_cmd_stop_ep()
1167 xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n"); in xhci_handle_cmd_stop_ep()
1168 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_handle_cmd_stop_ep()
1174 td->status = -EPROTO; in xhci_handle_cmd_stop_ep()
1177 err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type); in xhci_handle_cmd_stop_ep()
1180 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1184 * Per xHCI 4.6.9, Stop Endpoint command on a Stopped in xhci_handle_cmd_stop_ep()
1191 if (ep->ep_state & EP_HALTED) in xhci_handle_cmd_stop_ep()
1204 if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) in xhci_handle_cmd_stop_ep()
1209 xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n", in xhci_handle_cmd_stop_ep()
1212 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_handle_cmd_stop_ep()
1214 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1217 xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0); in xhci_handle_cmd_stop_ep()
1218 xhci_ring_cmd_db(xhci); in xhci_handle_cmd_stop_ep()
1228 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1232 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
1235 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_kill_ring_urbs() argument
1240 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) { in xhci_kill_ring_urbs()
1241 list_del_init(&cur_td->td_list); in xhci_kill_ring_urbs()
1243 if (!list_empty(&cur_td->cancelled_td_list)) in xhci_kill_ring_urbs()
1244 list_del_init(&cur_td->cancelled_td_list); in xhci_kill_ring_urbs()
1246 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); in xhci_kill_ring_urbs()
1248 inc_td_cnt(cur_td->urb); in xhci_kill_ring_urbs()
1250 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_ring_urbs()
1254 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, in xhci_kill_endpoint_urbs() argument
1262 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_kill_endpoint_urbs()
1266 if ((ep->ep_state & EP_HAS_STREAMS) || in xhci_kill_endpoint_urbs()
1267 (ep->ep_state & EP_GETTING_NO_STREAMS)) { in xhci_kill_endpoint_urbs()
1270 for (stream_id = 1; stream_id < ep->stream_info->num_streams; in xhci_kill_endpoint_urbs()
1272 ring = ep->stream_info->stream_rings[stream_id]; in xhci_kill_endpoint_urbs()
1276 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
1279 xhci_kill_ring_urbs(xhci, ring); in xhci_kill_endpoint_urbs()
1282 ring = ep->ring; in xhci_kill_endpoint_urbs()
1285 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
1288 xhci_kill_ring_urbs(xhci, ring); in xhci_kill_endpoint_urbs()
1291 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list, in xhci_kill_endpoint_urbs()
1293 list_del_init(&cur_td->cancelled_td_list); in xhci_kill_endpoint_urbs()
1294 inc_td_cnt(cur_td->urb); in xhci_kill_endpoint_urbs()
1297 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_endpoint_urbs()
1307 * Call with xhci->lock held.
1308 * lock is relased and re-acquired while giving back urb.
1310 void xhci_hc_died(struct xhci_hcd *xhci) in xhci_hc_died() argument
1314 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_hc_died()
1317 xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); in xhci_hc_died()
1318 xhci->xhc_state |= XHCI_STATE_DYING; in xhci_hc_died()
1320 xhci_cleanup_command_queue(xhci); in xhci_hc_died()
1323 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { in xhci_hc_died()
1324 if (!xhci->devs[i]) in xhci_hc_died()
1327 xhci_kill_endpoint_urbs(xhci, i, j); in xhci_hc_died()
1331 if (!(xhci->xhc_state & XHCI_STATE_REMOVING)) in xhci_hc_died()
1332 usb_hc_died(xhci_to_hcd(xhci)); in xhci_hc_died()
1335 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, in update_ring_for_set_deq_completion() argument
1342 dequeue_temp = ep_ring->dequeue; in update_ring_for_set_deq_completion()
1344 /* If we get two back-to-back stalls, and the first stalled transfer in update_ring_for_set_deq_completion()
1348 * the segment into la-la-land. in update_ring_for_set_deq_completion()
1350 if (trb_is_link(ep_ring->dequeue)) { in update_ring_for_set_deq_completion()
1351 ep_ring->deq_seg = ep_ring->deq_seg->next; in update_ring_for_set_deq_completion()
1352 ep_ring->dequeue = ep_ring->deq_seg->trbs; in update_ring_for_set_deq_completion()
1355 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { in update_ring_for_set_deq_completion()
1357 ep_ring->dequeue++; in update_ring_for_set_deq_completion()
1358 if (trb_is_link(ep_ring->dequeue)) { in update_ring_for_set_deq_completion()
1359 if (ep_ring->dequeue == in update_ring_for_set_deq_completion()
1360 dev->eps[ep_index].queued_deq_ptr) in update_ring_for_set_deq_completion()
1362 ep_ring->deq_seg = ep_ring->deq_seg->next; in update_ring_for_set_deq_completion()
1363 ep_ring->dequeue = ep_ring->deq_seg->trbs; in update_ring_for_set_deq_completion()
1365 if (ep_ring->dequeue == dequeue_temp) { in update_ring_for_set_deq_completion()
1366 xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); in update_ring_for_set_deq_completion()
1379 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_set_deq() argument
1391 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_set_deq()
1392 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); in xhci_handle_cmd_set_deq()
1393 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1397 ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id); in xhci_handle_cmd_set_deq()
1399 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", in xhci_handle_cmd_set_deq()
1405 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_set_deq()
1406 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); in xhci_handle_cmd_set_deq()
1410 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_handle_cmd_set_deq()
1411 stream_ctx = &ep->stream_info->stream_ctx_array[stream_id]; in xhci_handle_cmd_set_deq()
1412 trace_xhci_handle_cmd_set_deq_stream(ep->stream_info, stream_id); in xhci_handle_cmd_set_deq()
1421 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); in xhci_handle_cmd_set_deq()
1424 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); in xhci_handle_cmd_set_deq()
1426 slot_state = le32_to_cpu(slot_ctx->dev_state); in xhci_handle_cmd_set_deq()
1428 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
1433 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", in xhci_handle_cmd_set_deq()
1437 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", in xhci_handle_cmd_set_deq()
1450 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_handle_cmd_set_deq()
1451 deq = le64_to_cpu(stream_ctx->stream_ring) & SCTX_DEQ_MASK; in xhci_handle_cmd_set_deq()
1454 * Cadence xHCI controllers store some endpoint state in xhci_handle_cmd_set_deq()
1462 if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) { in xhci_handle_cmd_set_deq()
1463 stream_ctx->reserved[0] = 0; in xhci_handle_cmd_set_deq()
1464 stream_ctx->reserved[1] = 0; in xhci_handle_cmd_set_deq()
1467 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; in xhci_handle_cmd_set_deq()
1469 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
1471 if (xhci_trb_virt_to_dma(ep->queued_deq_seg, in xhci_handle_cmd_set_deq()
1472 ep->queued_deq_ptr) == deq) { in xhci_handle_cmd_set_deq()
1476 update_ring_for_set_deq_completion(xhci, ep->vdev, in xhci_handle_cmd_set_deq()
1479 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); in xhci_handle_cmd_set_deq()
1480 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", in xhci_handle_cmd_set_deq()
1481 ep->queued_deq_seg, ep->queued_deq_ptr); in xhci_handle_cmd_set_deq()
1485 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, in xhci_handle_cmd_set_deq()
1487 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_handle_cmd_set_deq()
1488 if (td->cancel_status == TD_CLEARING_CACHE) { in xhci_handle_cmd_set_deq()
1489 td->cancel_status = TD_CLEARED; in xhci_handle_cmd_set_deq()
1490 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", in xhci_handle_cmd_set_deq()
1491 __func__, td->urb); in xhci_handle_cmd_set_deq()
1492 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); in xhci_handle_cmd_set_deq()
1494 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", in xhci_handle_cmd_set_deq()
1495 __func__, td->urb, td->cancel_status); in xhci_handle_cmd_set_deq()
1499 ep->ep_state &= ~SET_DEQ_PENDING; in xhci_handle_cmd_set_deq()
1500 ep->queued_deq_seg = NULL; in xhci_handle_cmd_set_deq()
1501 ep->queued_deq_ptr = NULL; in xhci_handle_cmd_set_deq()
1504 if (!list_empty(&ep->cancelled_td_list)) { in xhci_handle_cmd_set_deq()
1505 xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n", in xhci_handle_cmd_set_deq()
1509 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1514 xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__); in xhci_handle_cmd_set_deq()
1515 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1519 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_reset_ep() argument
1526 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_reset_ep()
1527 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_reset_ep()
1531 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_reset_ep()
1537 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_handle_cmd_reset_ep()
1544 ep->ep_state &= ~EP_HALTED; in xhci_handle_cmd_reset_ep()
1549 if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) in xhci_handle_cmd_reset_ep()
1550 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_reset_ep()
1557 command->slot_id = slot_id; in xhci_handle_cmd_enable_slot()
1559 command->slot_id = 0; in xhci_handle_cmd_enable_slot()
1562 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_disable_slot() argument
1567 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_disable_slot()
1571 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_handle_cmd_disable_slot()
1574 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) in xhci_handle_cmd_disable_slot()
1576 xhci_free_device_endpoint_resources(xhci, virt_dev, true); in xhci_handle_cmd_disable_slot()
1579 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_config_ep() argument
1592 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_config_ep()
1595 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); in xhci_handle_cmd_config_ep()
1597 xhci_warn(xhci, "Could not get input context, bad type.\n"); in xhci_handle_cmd_config_ep()
1601 add_flags = le32_to_cpu(ctrl_ctx->add_flags); in xhci_handle_cmd_config_ep()
1604 ep_index = xhci_last_valid_endpoint(add_flags) - 1; in xhci_handle_cmd_config_ep()
1606 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index); in xhci_handle_cmd_config_ep()
1612 static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_addr_dev() argument
1617 vdev = xhci->devs[slot_id]; in xhci_handle_cmd_addr_dev()
1620 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_handle_cmd_addr_dev()
1624 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_reset_dev() argument
1629 vdev = xhci->devs[slot_id]; in xhci_handle_cmd_reset_dev()
1631 xhci_warn(xhci, "Reset device command completion for disabled slot %u\n", in xhci_handle_cmd_reset_dev()
1635 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_handle_cmd_reset_dev()
1638 xhci_dbg(xhci, "Completed reset device command.\n"); in xhci_handle_cmd_reset_dev()
1641 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, in xhci_handle_cmd_nec_get_fw() argument
1644 if (!(xhci->quirks & XHCI_NEC_HOST)) { in xhci_handle_cmd_nec_get_fw()
1645 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n"); in xhci_handle_cmd_nec_get_fw()
1648 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_handle_cmd_nec_get_fw()
1650 NEC_FW_MAJOR(le32_to_cpu(event->status)), in xhci_handle_cmd_nec_get_fw()
1651 NEC_FW_MINOR(le32_to_cpu(event->status))); in xhci_handle_cmd_nec_get_fw()
1656 list_del(&cmd->cmd_list); in xhci_complete_del_and_free_cmd()
1658 if (cmd->completion) { in xhci_complete_del_and_free_cmd()
1659 cmd->status = comp_code; in xhci_complete_del_and_free_cmd()
1660 cmd->comp_param = comp_param; in xhci_complete_del_and_free_cmd()
1661 complete(cmd->completion); in xhci_complete_del_and_free_cmd()
1667 void xhci_cleanup_command_queue(struct xhci_hcd *xhci) in xhci_cleanup_command_queue() argument
1670 xhci->current_cmd = NULL; in xhci_cleanup_command_queue()
1671 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) in xhci_cleanup_command_queue()
1677 struct xhci_hcd *xhci; in xhci_handle_command_timeout() local
1684 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer); in xhci_handle_command_timeout()
1686 spin_lock_irqsave(&xhci->lock, flags); in xhci_handle_command_timeout()
1692 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) { in xhci_handle_command_timeout()
1693 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1697 cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]); in xhci_handle_command_timeout()
1698 usbsts = readl(&xhci->op_regs->status); in xhci_handle_command_timeout()
1699 xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts)); in xhci_handle_command_timeout()
1701 /* Bail out and tear down xhci if a stop endpoint command failed */ in xhci_handle_command_timeout()
1705 xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n"); in xhci_handle_command_timeout()
1707 ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3), in xhci_handle_command_timeout()
1710 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_command_timeout()
1712 xhci_halt(xhci); in xhci_handle_command_timeout()
1713 xhci_hc_died(xhci); in xhci_handle_command_timeout()
1718 xhci->current_cmd->status = COMP_COMMAND_ABORTED; in xhci_handle_command_timeout()
1721 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_handle_command_timeout()
1723 xhci_hc_died(xhci); in xhci_handle_command_timeout()
1727 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && in xhci_handle_command_timeout()
1730 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; in xhci_handle_command_timeout()
1731 xhci_dbg(xhci, "Command timeout\n"); in xhci_handle_command_timeout()
1732 xhci_abort_cmd_ring(xhci, flags); in xhci_handle_command_timeout()
1737 if (xhci->xhc_state & XHCI_STATE_REMOVING) { in xhci_handle_command_timeout()
1738 xhci_dbg(xhci, "host removed, ring start fail?\n"); in xhci_handle_command_timeout()
1739 xhci_cleanup_command_queue(xhci); in xhci_handle_command_timeout()
1745 xhci_dbg(xhci, "Command timeout on stopped ring\n"); in xhci_handle_command_timeout()
1746 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); in xhci_handle_command_timeout()
1749 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1753 static void handle_cmd_completion(struct xhci_hcd *xhci, in handle_cmd_completion() argument
1756 unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); in handle_cmd_completion()
1757 u32 status = le32_to_cpu(event->status); in handle_cmd_completion()
1766 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); in handle_cmd_completion()
1770 cmd_dma = le64_to_cpu(event->cmd_trb); in handle_cmd_completion()
1771 cmd_trb = xhci->cmd_ring->dequeue; in handle_cmd_completion()
1773 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic, cmd_dma); in handle_cmd_completion()
1775 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); in handle_cmd_completion()
1779 complete_all(&xhci->cmd_ring_stop_completion); in handle_cmd_completion()
1783 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in handle_cmd_completion()
1790 xhci_warn(xhci, in handle_cmd_completion()
1795 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list); in handle_cmd_completion()
1797 cancel_delayed_work(&xhci->cmd_timer); in handle_cmd_completion()
1799 if (cmd->command_trb != xhci->cmd_ring->dequeue) { in handle_cmd_completion()
1800 xhci_err(xhci, in handle_cmd_completion()
1812 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in handle_cmd_completion()
1813 if (cmd->status == COMP_COMMAND_ABORTED) { in handle_cmd_completion()
1814 if (xhci->current_cmd == cmd) in handle_cmd_completion()
1815 xhci->current_cmd = NULL; in handle_cmd_completion()
1820 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); in handle_cmd_completion()
1826 xhci_handle_cmd_disable_slot(xhci, slot_id); in handle_cmd_completion()
1829 if (!cmd->completion) in handle_cmd_completion()
1830 xhci_handle_cmd_config_ep(xhci, slot_id); in handle_cmd_completion()
1835 xhci_handle_cmd_addr_dev(xhci, slot_id); in handle_cmd_completion()
1839 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1840 if (!cmd->completion) in handle_cmd_completion()
1841 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, in handle_cmd_completion()
1846 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1847 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1850 /* Is this an aborted command turned to NO-OP? */ in handle_cmd_completion()
1851 if (cmd->status == COMP_COMMAND_RING_STOPPED) in handle_cmd_completion()
1856 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1857 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1861 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) in handle_cmd_completion()
1864 le32_to_cpu(cmd_trb->generic.field[3])); in handle_cmd_completion()
1865 xhci_handle_cmd_reset_dev(xhci, slot_id); in handle_cmd_completion()
1868 xhci_handle_cmd_nec_get_fw(xhci, event); in handle_cmd_completion()
1872 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); in handle_cmd_completion()
1877 if (!list_is_singular(&xhci->cmd_list)) { in handle_cmd_completion()
1878 xhci->current_cmd = list_first_entry(&cmd->cmd_list, in handle_cmd_completion()
1880 xhci_mod_cmd_timer(xhci); in handle_cmd_completion()
1881 } else if (xhci->current_cmd == cmd) { in handle_cmd_completion()
1882 xhci->current_cmd = NULL; in handle_cmd_completion()
1888 inc_deq(xhci, xhci->cmd_ring); in handle_cmd_completion()
1891 static void handle_vendor_event(struct xhci_hcd *xhci, in handle_vendor_event() argument
1894 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); in handle_vendor_event()
1895 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) in handle_vendor_event()
1896 handle_cmd_completion(xhci, &event->event_cmd); in handle_vendor_event()
1899 static void handle_device_notification(struct xhci_hcd *xhci, in handle_device_notification() argument
1905 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); in handle_device_notification()
1906 if (!xhci->devs[slot_id]) { in handle_device_notification()
1907 xhci_warn(xhci, "Device Notification event for " in handle_device_notification()
1912 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", in handle_device_notification()
1914 udev = xhci->devs[slot_id]->udev; in handle_device_notification()
1915 if (udev && udev->parent) in handle_device_notification()
1916 usb_wakeup_notification(udev->parent, udev->portnum); in handle_device_notification()
1920 * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
1922 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
1931 static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) in xhci_cavium_reset_phy_quirk() argument
1933 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_cavium_reset_phy_quirk()
1939 writel(0x6F, hcd->regs + 0x1048); in xhci_cavium_reset_phy_quirk()
1941 /* De-assert the PHY reset */ in xhci_cavium_reset_phy_quirk()
1942 writel(0x7F, hcd->regs + 0x1048); in xhci_cavium_reset_phy_quirk()
1944 pll_lock_check = readl(hcd->regs + 0x1070); in xhci_cavium_reset_phy_quirk()
1945 } while (!(pll_lock_check & 0x1) && --retry_count); in xhci_cavium_reset_phy_quirk()
1948 static void handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event) in handle_port_status() argument
1960 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) in handle_port_status()
1961 xhci_warn(xhci, in handle_port_status()
1964 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); in handle_port_status()
1965 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); in handle_port_status()
1968 xhci_warn(xhci, "Port change event with invalid port ID %d\n", in handle_port_status()
1973 port = &xhci->hw_ports[port_id - 1]; in handle_port_status()
1974 if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) { in handle_port_status()
1975 xhci_warn(xhci, "Port change event, no port for port ID %u\n", in handle_port_status()
1982 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) { in handle_port_status()
1983 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n"); in handle_port_status()
1988 hcd = port->rhub->hcd; in handle_port_status()
1989 bus_state = &port->rhub->bus_state; in handle_port_status()
1990 hcd_portnum = port->hcd_portnum; in handle_port_status()
1991 portsc = readl(port->addr); in handle_port_status()
1993 xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n", in handle_port_status()
1994 hcd->self.busnum, hcd_portnum + 1, port_id, portsc); in handle_port_status()
1998 if (hcd->state == HC_STATE_SUSPENDED) { in handle_port_status()
1999 xhci_dbg(xhci, "resume root hub\n"); in handle_port_status()
2003 if (hcd->speed >= HCD_USB3 && in handle_port_status()
2005 if (port->slot_id && xhci->devs[port->slot_id]) in handle_port_status()
2006 xhci->devs[port->slot_id]->flags |= VDEV_PORT_ERROR; in handle_port_status()
2010 xhci_dbg(xhci, "port resume event for port %d\n", port_id); in handle_port_status()
2012 cmd_reg = readl(&xhci->op_regs->command); in handle_port_status()
2014 xhci_warn(xhci, "xHC is not running.\n"); in handle_port_status()
2019 xhci_dbg(xhci, "remote wake SS port %d\n", port_id); in handle_port_status()
2024 bus_state->port_remote_wakeup |= 1 << hcd_portnum; in handle_port_status()
2025 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
2026 usb_hcd_start_port_resume(&hcd->self, hcd_portnum); in handle_port_status()
2027 xhci_set_link_state(xhci, port, XDEV_U0); in handle_port_status()
2033 } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) { in handle_port_status()
2034 xhci_dbg(xhci, "resume HS port %d\n", port_id); in handle_port_status()
2035 port->resume_timestamp = jiffies + in handle_port_status()
2037 set_bit(hcd_portnum, &bus_state->resuming_ports); in handle_port_status()
2040 * usb device auto-resume latency around ~40ms. in handle_port_status()
2042 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); in handle_port_status()
2043 mod_timer(&hcd->rh_timer, in handle_port_status()
2044 port->resume_timestamp); in handle_port_status()
2045 usb_hcd_start_port_resume(&hcd->self, hcd_portnum); in handle_port_status()
2055 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); in handle_port_status()
2056 complete(&port->u3exit_done); in handle_port_status()
2059 * U3Exit state after a host-initiated resume. If it's a device in handle_port_status()
2064 if (port->slot_id && xhci->devs[port->slot_id]) in handle_port_status()
2065 xhci_ring_device(xhci, port->slot_id); in handle_port_status()
2066 if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) { in handle_port_status()
2067 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
2068 usb_wakeup_notification(hcd->self.root_hub, in handle_port_status()
2076 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or in handle_port_status()
2080 if (hcd->speed < HCD_USB3 && port->rexit_active) { in handle_port_status()
2081 complete(&port->rexit_done); in handle_port_status()
2082 port->rexit_active = false; in handle_port_status()
2087 if (hcd->speed < HCD_USB3) { in handle_port_status()
2088 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
2089 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) && in handle_port_status()
2091 xhci_cavium_reset_phy_quirk(xhci); in handle_port_status()
2104 * xHCI port-status-change events occur when the "or" of all the in handle_port_status()
2105 * status-change bits in the portsc register changes from 0 to 1. in handle_port_status()
2110 xhci_dbg(xhci, "%s: starting usb%d port polling.\n", in handle_port_status()
2111 __func__, hcd->self.busnum); in handle_port_status()
2112 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); in handle_port_status()
2113 spin_unlock(&xhci->lock); in handle_port_status()
2116 spin_lock(&xhci->lock); in handle_port_status()
2123 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td, dma_addr_t suspect_dma, in trb_in_td() argument
2131 start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb); in trb_in_td()
2132 cur_seg = td->start_seg; in trb_in_td()
2139 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); in trb_in_td()
2141 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb); in trb_in_td()
2144 xhci_warn(xhci, in trb_in_td()
2145 …"Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx… in trb_in_td()
2149 (unsigned long long)cur_seg->dma, in trb_in_td()
2163 (suspect_dma >= cur_seg->dma && in trb_in_td()
2173 cur_seg = cur_seg->next; in trb_in_td()
2174 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); in trb_in_td()
2175 } while (cur_seg != td->start_seg); in trb_in_td()
2180 static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, in xhci_clear_hub_tt_buffer() argument
2184 * As part of low/full-speed endpoint-halt processing in xhci_clear_hub_tt_buffer()
2187 if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) && in xhci_clear_hub_tt_buffer()
2188 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) && in xhci_clear_hub_tt_buffer()
2189 !(ep->ep_state & EP_CLEARING_TT)) { in xhci_clear_hub_tt_buffer()
2190 ep->ep_state |= EP_CLEARING_TT; in xhci_clear_hub_tt_buffer()
2191 td->urb->ep->hcpriv = td->urb->dev; in xhci_clear_hub_tt_buffer()
2192 if (usb_hub_clear_tt_buffer(td->urb)) in xhci_clear_hub_tt_buffer()
2193 ep->ep_state &= ~EP_CLEARING_TT; in xhci_clear_hub_tt_buffer()
2198 * Check if xhci internal endpoint state has gone to a "halt" state due to an
2227 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) in xhci_is_vendor_info_code() argument
2231 * treat as not-an-error. in xhci_is_vendor_info_code()
2233 xhci_dbg(xhci, "Vendor defined info completion code %u\n", in xhci_is_vendor_info_code()
2235 xhci_dbg(xhci, "Treating code as success.\n"); in xhci_is_vendor_info_code()
2241 static void finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in finish_td() argument
2247 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); in finish_td()
2279 if ((ep->ep_state & EP_HALTED) && in finish_td()
2280 !list_empty(&td->cancelled_td_list)) { in finish_td()
2281 xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n", in finish_td()
2283 td->start_seg, td->start_trb)); in finish_td()
2290 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2291 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in finish_td()
2295 * xhci internal endpoint state will go to a "halt" state for in finish_td()
2304 if (ep->ep_index != 0) in finish_td()
2305 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2307 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in finish_td()
2314 xhci_dequeue_td(xhci, td, ep_ring, td->status); in finish_td()
2321 union xhci_trb *trb = td->start_trb; in sum_trb_lengths()
2322 struct xhci_segment *seg = td->start_seg; in sum_trb_lengths()
2326 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); in sum_trb_lengths()
2334 static void process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_ctrl_td() argument
2343 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); in process_ctrl_td()
2344 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); in process_ctrl_td()
2345 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_ctrl_td()
2346 requested = td->urb->transfer_buffer_length; in process_ctrl_td()
2347 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_ctrl_td()
2352 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", in process_ctrl_td()
2354 td->status = -ESHUTDOWN; in process_ctrl_td()
2357 td->status = 0; in process_ctrl_td()
2360 td->status = 0; in process_ctrl_td()
2364 td->urb->actual_length = remaining; in process_ctrl_td()
2366 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); in process_ctrl_td()
2371 td->urb->actual_length = 0; in process_ctrl_td()
2375 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2378 td->urb->actual_length = requested; in process_ctrl_td()
2381 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", in process_ctrl_td()
2390 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", in process_ctrl_td()
2391 trb_comp_code, ep->ep_index); in process_ctrl_td()
2396 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2397 else if (!td->urb_length_set) in process_ctrl_td()
2398 td->urb->actual_length = 0; in process_ctrl_td()
2412 td->urb_length_set = true; in process_ctrl_td()
2413 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2414 xhci_dbg(xhci, "Waiting for status stage event\n"); in process_ctrl_td()
2419 if (!td->urb_length_set) in process_ctrl_td()
2420 td->urb->actual_length = requested; in process_ctrl_td()
2423 finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_ctrl_td()
2429 static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_isoc_td() argument
2441 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_isoc_td()
2442 urb_priv = td->urb->hcpriv; in process_isoc_td()
2443 idx = urb_priv->num_tds_done; in process_isoc_td()
2444 frame = &td->urb->iso_frame_desc[idx]; in process_isoc_td()
2445 requested = frame->length; in process_isoc_td()
2446 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_isoc_td()
2447 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); in process_isoc_td()
2448 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? in process_isoc_td()
2449 -EREMOTEIO : 0; in process_isoc_td()
2454 /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */ in process_isoc_td()
2455 if (td->error_mid_td) in process_isoc_td()
2458 frame->status = short_framestatus; in process_isoc_td()
2462 frame->status = 0; in process_isoc_td()
2465 frame->status = short_framestatus; in process_isoc_td()
2469 frame->status = -ECOMM; in process_isoc_td()
2475 frame->status = -EOVERFLOW; in process_isoc_td()
2476 if (ep_trb != td->end_trb) in process_isoc_td()
2477 td->error_mid_td = true; in process_isoc_td()
2481 frame->status = -EPROTO; in process_isoc_td()
2484 frame->status = -EPROTO; in process_isoc_td()
2486 if (ep_trb != td->end_trb) in process_isoc_td()
2487 td->error_mid_td = true; in process_isoc_td()
2494 frame->status = short_framestatus; in process_isoc_td()
2505 frame->status = -1; in process_isoc_td()
2509 if (td->urb_length_set) in process_isoc_td()
2513 frame->actual_length = sum_trb_lengths(td, ep_trb) + in process_isoc_td()
2514 ep_trb_len - remaining; in process_isoc_td()
2516 frame->actual_length = requested; in process_isoc_td()
2518 td->urb->actual_length += frame->actual_length; in process_isoc_td()
2522 if (td->error_mid_td && ep_trb != td->end_trb) { in process_isoc_td()
2523 xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n"); in process_isoc_td()
2524 td->urb_length_set = true; in process_isoc_td()
2527 finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_isoc_td()
2530 static void skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, in skip_isoc_td() argument
2537 urb_priv = td->urb->hcpriv; in skip_isoc_td()
2538 idx = urb_priv->num_tds_done; in skip_isoc_td()
2539 frame = &td->urb->iso_frame_desc[idx]; in skip_isoc_td()
2542 frame->status = -EXDEV; in skip_isoc_td()
2545 frame->actual_length = 0; in skip_isoc_td()
2547 xhci_dequeue_td(xhci, td, ep->ring, status); in skip_isoc_td()
2553 static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_bulk_intr_td() argument
2561 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); in process_bulk_intr_td()
2562 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_bulk_intr_td()
2563 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_bulk_intr_td()
2564 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); in process_bulk_intr_td()
2565 requested = td->urb->transfer_buffer_length; in process_bulk_intr_td()
2569 ep->err_count = 0; in process_bulk_intr_td()
2571 if (ep_trb != td->end_trb || remaining) { in process_bulk_intr_td()
2572 xhci_warn(xhci, "WARN Successful completion on short TX\n"); in process_bulk_intr_td()
2573 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", in process_bulk_intr_td()
2574 td->urb->ep->desc.bEndpointAddress, in process_bulk_intr_td()
2577 td->status = 0; in process_bulk_intr_td()
2580 td->status = 0; in process_bulk_intr_td()
2583 td->urb->actual_length = remaining; in process_bulk_intr_td()
2587 td->urb->actual_length = sum_trb_lengths(td, ep_trb); in process_bulk_intr_td()
2590 if (xhci->quirks & XHCI_NO_SOFT_RETRY || in process_bulk_intr_td()
2591 (ep->err_count++ > MAX_SOFT_RETRY) || in process_bulk_intr_td()
2592 le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) in process_bulk_intr_td()
2595 td->status = 0; in process_bulk_intr_td()
2597 xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET); in process_bulk_intr_td()
2604 if (ep_trb == td->end_trb) in process_bulk_intr_td()
2605 td->urb->actual_length = requested - remaining; in process_bulk_intr_td()
2607 td->urb->actual_length = in process_bulk_intr_td()
2609 ep_trb_len - remaining; in process_bulk_intr_td()
2612 xhci_warn(xhci, "bad transfer trb length %d in event trb\n", in process_bulk_intr_td()
2614 td->urb->actual_length = 0; in process_bulk_intr_td()
2617 finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_bulk_intr_td()
2620 /* Transfer events which don't point to a transfer TRB, see xhci 4.17.4 */
2621 static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in handle_transferless_tx_event() argument
2629 xhci_dbg(xhci, "Stream transaction error ep %u no id\n", ep->ep_index); in handle_transferless_tx_event()
2630 if (ep->err_count++ > MAX_SOFT_RETRY) in handle_transferless_tx_event()
2631 xhci_handle_halted_endpoint(xhci, ep, NULL, EP_HARD_RESET); in handle_transferless_tx_event()
2633 xhci_handle_halted_endpoint(xhci, ep, NULL, EP_SOFT_RESET); in handle_transferless_tx_event()
2640 xhci_err(xhci, "Transfer event %u for unknown stream ring slot %u ep %u\n", in handle_transferless_tx_event()
2641 trb_comp_code, ep->vdev->slot_id, ep->ep_index); in handle_transferless_tx_event()
2642 return -ENODEV; in handle_transferless_tx_event()
2652 static int handle_tx_event(struct xhci_hcd *xhci, in handle_tx_event() argument
2664 int status = -EINPROGRESS; in handle_tx_event()
2668 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); in handle_tx_event()
2669 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; in handle_tx_event()
2670 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in handle_tx_event()
2671 ep_trb_dma = le64_to_cpu(event->buffer); in handle_tx_event()
2673 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in handle_tx_event()
2675 xhci_err(xhci, "ERROR Invalid Transfer event\n"); in handle_tx_event()
2680 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in handle_tx_event()
2683 xhci_err(xhci, in handle_tx_event()
2690 return handle_transferless_tx_event(xhci, ep, trb_comp_code); in handle_tx_event()
2698 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { in handle_tx_event()
2700 xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n", in handle_tx_event()
2701 slot_id, ep_index, ep_ring->last_td_was_short); in handle_tx_event()
2708 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n", in handle_tx_event()
2712 xhci_dbg(xhci, in handle_tx_event()
2713 "Stopped on No-op or Link TRB for slot %u ep %u\n", in handle_tx_event()
2717 xhci_dbg(xhci, in handle_tx_event()
2723 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id, in handle_tx_event()
2725 status = -EPIPE; in handle_tx_event()
2728 xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n", in handle_tx_event()
2730 status = -EPROTO; in handle_tx_event()
2733 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n", in handle_tx_event()
2735 status = -EPROTO; in handle_tx_event()
2738 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n", in handle_tx_event()
2740 status = -EOVERFLOW; in handle_tx_event()
2744 xhci_warn(xhci, in handle_tx_event()
2747 status = -EILSEQ; in handle_tx_event()
2751 xhci_warn(xhci, in handle_tx_event()
2754 status = -ENOSR; in handle_tx_event()
2757 xhci_warn(xhci, in handle_tx_event()
2762 xhci_warn(xhci, in handle_tx_event()
2772 xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index); in handle_tx_event()
2773 if (ep->skip) in handle_tx_event()
2777 xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index); in handle_tx_event()
2778 if (ep->skip) in handle_tx_event()
2788 ep->skip = true; in handle_tx_event()
2789 xhci_dbg(xhci, in handle_tx_event()
2794 ep->skip = true; in handle_tx_event()
2795 xhci_dbg(xhci, in handle_tx_event()
2802 xhci_warn(xhci, in handle_tx_event()
2805 status = -EPROTO; in handle_tx_event()
2808 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { in handle_tx_event()
2812 xhci_warn(xhci, in handle_tx_event()
2815 if (ep->skip) in handle_tx_event()
2821 * xhci 4.10.2 states isoc endpoints should continue in handle_tx_event()
2825 * xhci 4.9.1 states that if there are errors in mult-TRB in handle_tx_event()
2833 td = list_first_entry_or_null(&ep_ring->td_list, struct xhci_td, td_list); in handle_tx_event()
2835 if (td && td->error_mid_td && !trb_in_td(xhci, td, ep_trb_dma, false)) { in handle_tx_event()
2836 xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); in handle_tx_event()
2837 xhci_dequeue_td(xhci, td, ep_ring, td->status); in handle_tx_event()
2840 if (list_empty(&ep_ring->td_list)) { in handle_tx_event()
2849 !ep_ring->last_td_was_short) { in handle_tx_event()
2850 xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n", in handle_tx_event()
2854 ep->skip = false; in handle_tx_event()
2859 td = list_first_entry(&ep_ring->td_list, struct xhci_td, in handle_tx_event()
2863 ep_seg = trb_in_td(xhci, td, ep_trb_dma, false); in handle_tx_event()
2867 if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { in handle_tx_event()
2872 skip_isoc_td(xhci, td, ep, status); in handle_tx_event()
2873 if (!list_empty(&ep_ring->td_list)) in handle_tx_event()
2876 xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n", in handle_tx_event()
2878 ep->skip = false; in handle_tx_event()
2885 * TD pointed by 'ep_ring->dequeue' because that the hardware dequeue in handle_tx_event()
2899 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && in handle_tx_event()
2900 ep_ring->last_td_was_short) { in handle_tx_event()
2901 ep_ring->last_td_was_short = false; in handle_tx_event()
2906 xhci_err(xhci, in handle_tx_event()
2909 trb_in_td(xhci, td, ep_trb_dma, true); in handle_tx_event()
2911 return -ESHUTDOWN; in handle_tx_event()
2914 if (ep->skip) { in handle_tx_event()
2915 xhci_dbg(xhci, in handle_tx_event()
2918 ep->skip = false; in handle_tx_event()
2922 * If ep->skip is set, it means there are missed tds on the in handle_tx_event()
2927 } while (ep->skip); in handle_tx_event()
2930 ep_ring->last_td_was_short = true; in handle_tx_event()
2932 ep_ring->last_td_was_short = false; in handle_tx_event()
2934 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)]; in handle_tx_event()
2938 * No-op TRB could trigger interrupts in a case where a URB was killed in handle_tx_event()
2947 td->status = status; in handle_tx_event()
2950 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) in handle_tx_event()
2951 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2952 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) in handle_tx_event()
2953 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2955 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
2960 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in handle_tx_event()
2965 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", in handle_tx_event()
2967 ir->event_ring->deq_seg, in handle_tx_event()
2968 ir->event_ring->dequeue), in handle_tx_event()
2969 lower_32_bits(le64_to_cpu(event->buffer)), in handle_tx_event()
2970 upper_32_bits(le64_to_cpu(event->buffer)), in handle_tx_event()
2971 le32_to_cpu(event->transfer_len), in handle_tx_event()
2972 le32_to_cpu(event->flags)); in handle_tx_event()
2973 return -ENODEV; in handle_tx_event()
2977 * This function handles one OS-owned event on the event ring. It may drop
2978 * xhci->lock between event processing (e.g. to pass up port status changes).
2980 static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter *ir, in xhci_handle_event_trb() argument
2985 trace_xhci_handle_event(ir->event_ring, &event->generic, in xhci_handle_event_trb()
2986 xhci_trb_virt_to_dma(ir->event_ring->deq_seg, in xhci_handle_event_trb()
2987 ir->event_ring->dequeue)); in xhci_handle_event_trb()
2994 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); in xhci_handle_event_trb()
2999 handle_cmd_completion(xhci, &event->event_cmd); in xhci_handle_event_trb()
3002 handle_port_status(xhci, event); in xhci_handle_event_trb()
3005 handle_tx_event(xhci, ir, &event->trans_event); in xhci_handle_event_trb()
3008 handle_device_notification(xhci, event); in xhci_handle_event_trb()
3012 handle_vendor_event(xhci, event, trb_type); in xhci_handle_event_trb()
3014 xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type); in xhci_handle_event_trb()
3016 /* Any of the above functions may drop and re-acquire the lock, so check in xhci_handle_event_trb()
3017 * to make sure a watchdog timer didn't mark the host as non-responsive. in xhci_handle_event_trb()
3019 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_handle_event_trb()
3020 xhci_dbg(xhci, "xHCI host dying, returning from event handler.\n"); in xhci_handle_event_trb()
3021 return -ENODEV; in xhci_handle_event_trb()
3029 * - When all events have finished
3030 * - To avoid "Event Ring Full Error" condition
3032 static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, in xhci_update_erst_dequeue() argument
3039 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); in xhci_update_erst_dequeue()
3040 deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg, in xhci_update_erst_dequeue()
3041 ir->event_ring->dequeue); in xhci_update_erst_dequeue()
3043 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n"); in xhci_update_erst_dequeue()
3052 temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK; in xhci_update_erst_dequeue()
3058 xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue); in xhci_update_erst_dequeue()
3064 if (!ir->ip_autoclear) { in xhci_clear_interrupt_pending()
3067 irq_pending = readl(&ir->ir_set->irq_pending); in xhci_clear_interrupt_pending()
3069 writel(irq_pending, &ir->ir_set->irq_pending); in xhci_clear_interrupt_pending()
3074 * Handle all OS-owned events on an interrupter event ring. It may drop
3075 * and reaquire xhci->lock between event processing.
3077 static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir) in xhci_handle_events() argument
3086 if (!ir->event_ring || !ir->event_ring->dequeue) { in xhci_handle_events()
3087 xhci_err(xhci, "ERROR interrupter event ring not ready\n"); in xhci_handle_events()
3088 return -ENOMEM; in xhci_handle_events()
3091 if (xhci->xhc_state & XHCI_STATE_DYING || in xhci_handle_events()
3092 xhci->xhc_state & XHCI_STATE_HALTED) { in xhci_handle_events()
3093 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. Shouldn't IRQs be disabled?\n"); in xhci_handle_events()
3096 temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); in xhci_handle_events()
3097 xhci_write_64(xhci, temp | ERST_EHB, &ir->ir_set->erst_dequeue); in xhci_handle_events()
3098 return -ENODEV; in xhci_handle_events()
3102 while (unhandled_event_trb(ir->event_ring)) { in xhci_handle_events()
3103 err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue); in xhci_handle_events()
3110 xhci_update_erst_dequeue(xhci, ir, false); in xhci_handle_events()
3112 if (ir->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN) in xhci_handle_events()
3113 ir->isoc_bei_interval = ir->isoc_bei_interval / 2; in xhci_handle_events()
3119 inc_deq(xhci, ir->event_ring); in xhci_handle_events()
3125 xhci_update_erst_dequeue(xhci, ir, true); in xhci_handle_events()
3131 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
3137 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_irq() local
3141 spin_lock(&xhci->lock); in xhci_irq()
3143 status = readl(&xhci->op_regs->status); in xhci_irq()
3145 xhci_hc_died(xhci); in xhci_irq()
3155 xhci_warn(xhci, "WARNING: Host Controller Error\n"); in xhci_irq()
3160 xhci_warn(xhci, "WARNING: Host System Error\n"); in xhci_irq()
3161 xhci_halt(xhci); in xhci_irq()
3167 * so we can receive interrupts from other MSI-X interrupters. in xhci_irq()
3171 writel(status, &xhci->op_regs->status); in xhci_irq()
3174 xhci_handle_events(xhci, xhci->interrupters[0]); in xhci_irq()
3176 spin_unlock(&xhci->lock); in xhci_irq()
3190 * Generic function for queueing a TRB on a ring.
3196 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, in queue_trb() argument
3202 trb = &ring->enqueue->generic; in queue_trb()
3203 trb->field[0] = cpu_to_le32(field1); in queue_trb()
3204 trb->field[1] = cpu_to_le32(field2); in queue_trb()
3205 trb->field[2] = cpu_to_le32(field3); in queue_trb()
3208 trb->field[3] = cpu_to_le32(field4); in queue_trb()
3211 xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue)); in queue_trb()
3213 inc_enq(xhci, ring, more_trbs_coming); in queue_trb()
3220 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in prepare_ring() argument
3233 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); in prepare_ring()
3234 return -ENOENT; in prepare_ring()
3236 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); in prepare_ring()
3238 /* XXX not sure if this should be -ENOENT or not */ in prepare_ring()
3239 return -EINVAL; in prepare_ring()
3241 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); in prepare_ring()
3247 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); in prepare_ring()
3252 return -EINVAL; in prepare_ring()
3255 if (ep_ring != xhci->cmd_ring) { in prepare_ring()
3256 new_segs = xhci_ring_expansion_needed(xhci, ep_ring, num_trbs); in prepare_ring()
3258 xhci_err(xhci, "Do not support expand command ring\n"); in prepare_ring()
3259 return -ENOMEM; in prepare_ring()
3263 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, in prepare_ring()
3265 if (xhci_ring_expansion(xhci, ep_ring, new_segs, mem_flags)) { in prepare_ring()
3266 xhci_err(xhci, "Ring expansion failed\n"); in prepare_ring()
3267 return -ENOMEM; in prepare_ring()
3271 while (trb_is_link(ep_ring->enqueue)) { in prepare_ring()
3275 if (!xhci_link_chain_quirk(xhci, ep_ring->type)) in prepare_ring()
3276 ep_ring->enqueue->link.control &= in prepare_ring()
3279 ep_ring->enqueue->link.control |= in prepare_ring()
3283 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); in prepare_ring()
3286 if (link_trb_toggles_cycle(ep_ring->enqueue)) in prepare_ring()
3287 ep_ring->cycle_state ^= 1; in prepare_ring()
3289 ep_ring->enq_seg = ep_ring->enq_seg->next; in prepare_ring()
3290 ep_ring->enqueue = ep_ring->enq_seg->trbs; in prepare_ring()
3293 if (link_trb_count++ > ep_ring->num_segs) { in prepare_ring()
3294 xhci_warn(xhci, "Ring is an endless link TRB loop\n"); in prepare_ring()
3295 return -EINVAL; in prepare_ring()
3299 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) { in prepare_ring()
3300 xhci_warn(xhci, "Missing link TRB at end of ring segment\n"); in prepare_ring()
3301 return -EINVAL; in prepare_ring()
3307 static int prepare_transfer(struct xhci_hcd *xhci, in prepare_transfer() argument
3320 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in prepare_transfer()
3322 ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index, in prepare_transfer()
3325 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", in prepare_transfer()
3327 return -EINVAL; in prepare_transfer()
3330 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), in prepare_transfer()
3335 urb_priv = urb->hcpriv; in prepare_transfer()
3336 td = &urb_priv->td[td_index]; in prepare_transfer()
3338 INIT_LIST_HEAD(&td->td_list); in prepare_transfer()
3339 INIT_LIST_HEAD(&td->cancelled_td_list); in prepare_transfer()
3342 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); in prepare_transfer()
3347 td->urb = urb; in prepare_transfer()
3349 list_add_tail(&td->td_list, &ep_ring->td_list); in prepare_transfer()
3350 td->start_seg = ep_ring->enq_seg; in prepare_transfer()
3351 td->start_trb = ep_ring->enqueue; in prepare_transfer()
3360 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), in count_trbs()
3370 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length); in count_trbs_needed()
3378 full_len = urb->transfer_buffer_length; in count_sg_trbs_needed()
3380 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { in count_sg_trbs_needed()
3384 full_len -= len; in count_sg_trbs_needed()
3396 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); in count_isoc_trbs_needed()
3397 len = urb->iso_frame_desc[i].length; in count_isoc_trbs_needed()
3404 if (unlikely(running_total != urb->transfer_buffer_length)) in check_trb_math()
3405 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " in check_trb_math()
3408 urb->ep->desc.bEndpointAddress, in check_trb_math()
3410 urb->transfer_buffer_length, in check_trb_math()
3411 urb->transfer_buffer_length); in check_trb_math()
3414 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, in giveback_first_trb() argument
3424 start_trb->field[3] |= cpu_to_le32(start_cycle); in giveback_first_trb()
3426 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); in giveback_first_trb()
3427 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); in giveback_first_trb()
3435 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); in check_interval()
3436 ep_interval = urb->interval; in check_interval()
3439 if (urb->dev->speed == USB_SPEED_LOW || in check_interval()
3440 urb->dev->speed == USB_SPEED_FULL) in check_interval()
3447 dev_dbg_ratelimited(&urb->dev->dev, in check_interval()
3448 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", in check_interval()
3451 urb->interval = xhci_interval; in check_interval()
3453 if (urb->dev->speed == USB_SPEED_LOW || in check_interval()
3454 urb->dev->speed == USB_SPEED_FULL) in check_interval()
3455 urb->interval /= 8; in check_interval()
3460 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3465 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_intr_tx() argument
3470 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index); in xhci_queue_intr_tx()
3473 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_intr_tx()
3477 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3486 * TD size = total_packet_count - packets_transferred
3488 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3496 static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, in xhci_td_remainder() argument
3502 /* MTK xHCI 0.96 contains some features from 1.0 */ in xhci_td_remainder()
3503 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) in xhci_td_remainder()
3504 return ((td_total_len - transferred) >> 10); in xhci_td_remainder()
3506 /* One TRB with a zero-length data packet. */ in xhci_td_remainder()
3511 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ in xhci_td_remainder()
3512 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) in xhci_td_remainder()
3515 maxp = usb_endpoint_maxp(&urb->ep->desc); in xhci_td_remainder()
3519 return (total_packet_count - ((transferred + trb_buff_len) / maxp)); in xhci_td_remainder()
3523 static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, in xhci_align_td() argument
3526 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_align_td()
3532 max_pkt = usb_endpoint_maxp(&urb->ep->desc); in xhci_align_td()
3539 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n", in xhci_align_td()
3544 *trb_buff_len -= unalign; in xhci_align_td()
3545 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len); in xhci_align_td()
3554 new_buff_len = max_pkt - (enqd_len % max_pkt); in xhci_align_td()
3556 if (new_buff_len > (urb->transfer_buffer_length - enqd_len)) in xhci_align_td()
3557 new_buff_len = (urb->transfer_buffer_length - enqd_len); in xhci_align_td()
3561 if (urb->num_sgs) { in xhci_align_td()
3562 len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, in xhci_align_td()
3563 seg->bounce_buf, new_buff_len, enqd_len); in xhci_align_td()
3565 xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", in xhci_align_td()
3568 memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); in xhci_align_td()
3571 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, in xhci_align_td()
3574 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, in xhci_align_td()
3578 if (dma_mapping_error(dev, seg->bounce_dma)) { in xhci_align_td()
3580 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n"); in xhci_align_td()
3584 seg->bounce_len = new_buff_len; in xhci_align_td()
3585 seg->bounce_offs = enqd_len; in xhci_align_td()
3587 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len); in xhci_align_td()
3592 /* This is very similar to what ehci-q.c qtd_fill() does */
3593 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_bulk_tx() argument
3611 ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_bulk_tx()
3613 return -EINVAL; in xhci_queue_bulk_tx()
3615 full_len = urb->transfer_buffer_length; in xhci_queue_bulk_tx()
3617 if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) { in xhci_queue_bulk_tx()
3618 num_sgs = urb->num_mapped_sgs; in xhci_queue_bulk_tx()
3619 sg = urb->sg; in xhci_queue_bulk_tx()
3625 addr = (u64) urb->transfer_dma; in xhci_queue_bulk_tx()
3628 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3629 ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3634 urb_priv = urb->hcpriv; in xhci_queue_bulk_tx()
3636 /* Deal with URB_ZERO_PACKET - need one more td/trb */ in xhci_queue_bulk_tx()
3637 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1) in xhci_queue_bulk_tx()
3640 td = &urb_priv->td[0]; in xhci_queue_bulk_tx()
3647 start_trb = &ring->enqueue->generic; in xhci_queue_bulk_tx()
3648 start_cycle = ring->cycle_state; in xhci_queue_bulk_tx()
3651 /* Queue the TRBs, even if they are zero-length */ in xhci_queue_bulk_tx()
3661 trb_buff_len = full_len - enqd_len; in xhci_queue_bulk_tx()
3669 field |= ring->cycle_state; in xhci_queue_bulk_tx()
3676 if (trb_is_link(ring->enqueue + 1)) { in xhci_queue_bulk_tx()
3677 if (xhci_align_td(xhci, urb, enqd_len, in xhci_queue_bulk_tx()
3679 ring->enq_seg)) { in xhci_queue_bulk_tx()
3680 send_addr = ring->enq_seg->bounce_dma; in xhci_queue_bulk_tx()
3682 td->bounce_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3690 td->end_trb = ring->enqueue; in xhci_queue_bulk_tx()
3691 td->end_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3693 memcpy(&send_addr, urb->transfer_buffer, in xhci_queue_bulk_tx()
3705 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len, in xhci_queue_bulk_tx()
3712 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt, in xhci_queue_bulk_tx()
3722 --num_sgs; in xhci_queue_bulk_tx()
3723 sent_len -= block_len; in xhci_queue_bulk_tx()
3731 block_len -= sent_len; in xhci_queue_bulk_tx()
3736 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3737 ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3739 urb_priv->td[1].end_trb = ring->enqueue; in xhci_queue_bulk_tx()
3740 urb_priv->td[1].end_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3741 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; in xhci_queue_bulk_tx()
3742 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); in xhci_queue_bulk_tx()
3746 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3751 /* Caller must have locked xhci->lock */
3752 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_ctrl_tx() argument
3765 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_ctrl_tx()
3767 return -EINVAL; in xhci_queue_ctrl_tx()
3773 if (!urb->setup_packet) in xhci_queue_ctrl_tx()
3774 return -EINVAL; in xhci_queue_ctrl_tx()
3776 if ((xhci->quirks & XHCI_ETRON_HOST) && in xhci_queue_ctrl_tx()
3777 urb->dev->speed >= USB_SPEED_SUPER) { in xhci_queue_ctrl_tx()
3783 if (trb_is_link(ep_ring->enqueue + 1)) { in xhci_queue_ctrl_tx()
3784 field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state; in xhci_queue_ctrl_tx()
3785 queue_trb(xhci, ep_ring, false, 0, 0, in xhci_queue_ctrl_tx()
3797 if (urb->transfer_buffer_length > 0) in xhci_queue_ctrl_tx()
3799 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_ctrl_tx()
3800 ep_index, urb->stream_id, in xhci_queue_ctrl_tx()
3805 urb_priv = urb->hcpriv; in xhci_queue_ctrl_tx()
3806 td = &urb_priv->td[0]; in xhci_queue_ctrl_tx()
3813 start_trb = &ep_ring->enqueue->generic; in xhci_queue_ctrl_tx()
3814 start_cycle = ep_ring->cycle_state; in xhci_queue_ctrl_tx()
3816 /* Queue setup TRB - see section 6.4.1.2.1 */ in xhci_queue_ctrl_tx()
3818 setup = (struct usb_ctrlrequest *) urb->setup_packet; in xhci_queue_ctrl_tx()
3824 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ in xhci_queue_ctrl_tx()
3825 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) { in xhci_queue_ctrl_tx()
3826 if (urb->transfer_buffer_length > 0) { in xhci_queue_ctrl_tx()
3827 if (setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3834 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3835 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, in xhci_queue_ctrl_tx()
3836 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, in xhci_queue_ctrl_tx()
3848 if (urb->transfer_buffer_length > 0) { in xhci_queue_ctrl_tx()
3853 memcpy(&addr, urb->transfer_buffer, in xhci_queue_ctrl_tx()
3854 urb->transfer_buffer_length); in xhci_queue_ctrl_tx()
3858 addr = (u64) urb->transfer_dma; in xhci_queue_ctrl_tx()
3861 remainder = xhci_td_remainder(xhci, 0, in xhci_queue_ctrl_tx()
3862 urb->transfer_buffer_length, in xhci_queue_ctrl_tx()
3863 urb->transfer_buffer_length, in xhci_queue_ctrl_tx()
3865 length_field = TRB_LEN(urb->transfer_buffer_length) | in xhci_queue_ctrl_tx()
3868 if (setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3870 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3874 field | ep_ring->cycle_state); in xhci_queue_ctrl_tx()
3878 td->end_trb = ep_ring->enqueue; in xhci_queue_ctrl_tx()
3879 td->end_seg = ep_ring->enq_seg; in xhci_queue_ctrl_tx()
3881 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ in xhci_queue_ctrl_tx()
3883 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3887 queue_trb(xhci, ep_ring, false, in xhci_queue_ctrl_tx()
3892 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); in xhci_queue_ctrl_tx()
3894 giveback_first_trb(xhci, slot_id, ep_index, 0, in xhci_queue_ctrl_tx()
3905 * zero. Only xHCI 1.0 host controllers support this field.
3907 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, in xhci_get_burst_count() argument
3912 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER) in xhci_get_burst_count()
3915 max_burst = urb->ep->ss_ep_comp.bMaxBurst; in xhci_get_burst_count()
3916 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; in xhci_get_burst_count()
3927 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, in xhci_get_last_burst_packet_count() argument
3933 if (xhci->hci_version < 0x100) in xhci_get_last_burst_packet_count()
3936 if (urb->dev->speed >= USB_SPEED_SUPER) { in xhci_get_last_burst_packet_count()
3938 max_burst = urb->ep->ss_ep_comp.bMaxBurst; in xhci_get_last_burst_packet_count()
3941 * number of packets, but the TLBPC field is zero-based. in xhci_get_last_burst_packet_count()
3945 return residue - 1; in xhci_get_last_burst_packet_count()
3949 return total_packet_count - 1; in xhci_get_last_burst_packet_count()
3959 static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, in xhci_get_isoc_frame_id() argument
3965 if (urb->dev->speed == USB_SPEED_LOW || in xhci_get_isoc_frame_id()
3966 urb->dev->speed == USB_SPEED_FULL) in xhci_get_isoc_frame_id()
3967 start_frame = urb->start_frame + index * urb->interval; in xhci_get_isoc_frame_id()
3969 start_frame = (urb->start_frame + index * urb->interval) >> 3; in xhci_get_isoc_frame_id()
3979 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_get_isoc_frame_id()
3980 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_get_isoc_frame_id()
3996 current_frame_id = readl(&xhci->run_regs->microframe_index); in xhci_get_isoc_frame_id()
4007 ret = -EINVAL; in xhci_get_isoc_frame_id()
4011 ret = -EINVAL; in xhci_get_isoc_frame_id()
4013 ret = -EINVAL; in xhci_get_isoc_frame_id()
4017 if (ret == -EINVAL || start_frame == start_frame_id) { in xhci_get_isoc_frame_id()
4019 if (urb->dev->speed == USB_SPEED_LOW || in xhci_get_isoc_frame_id()
4020 urb->dev->speed == USB_SPEED_FULL) in xhci_get_isoc_frame_id()
4021 urb->start_frame = start_frame; in xhci_get_isoc_frame_id()
4023 urb->start_frame = start_frame << 3; in xhci_get_isoc_frame_id()
4029 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n", in xhci_get_isoc_frame_id()
4032 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n"); in xhci_get_isoc_frame_id()
4040 static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i, in trb_block_event_intr() argument
4043 if (xhci->hci_version < 0x100) in trb_block_event_intr()
4046 if (i == num_tds - 1) in trb_block_event_intr()
4052 if (i && ir->isoc_bei_interval && xhci->quirks & XHCI_AVOID_BEI) in trb_block_event_intr()
4053 return !!(i % ir->isoc_bei_interval); in trb_block_event_intr()
4059 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx() argument
4078 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx()
4079 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; in xhci_queue_isoc_tx()
4080 ir = xhci->interrupters[0]; in xhci_queue_isoc_tx()
4082 num_tds = urb->number_of_packets; in xhci_queue_isoc_tx()
4084 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); in xhci_queue_isoc_tx()
4085 return -EINVAL; in xhci_queue_isoc_tx()
4087 start_addr = (u64) urb->transfer_dma; in xhci_queue_isoc_tx()
4088 start_trb = &ep_ring->enqueue->generic; in xhci_queue_isoc_tx()
4089 start_cycle = ep_ring->cycle_state; in xhci_queue_isoc_tx()
4091 urb_priv = urb->hcpriv; in xhci_queue_isoc_tx()
4092 /* Queue the TRBs for each TD, even if they are zero-length */ in xhci_queue_isoc_tx()
4100 addr = start_addr + urb->iso_frame_desc[i].offset; in xhci_queue_isoc_tx()
4101 td_len = urb->iso_frame_desc[i].length; in xhci_queue_isoc_tx()
4103 max_pkt = usb_endpoint_maxp(&urb->ep->desc); in xhci_queue_isoc_tx()
4106 /* A zero-length transfer still involves at least one packet. */ in xhci_queue_isoc_tx()
4109 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count); in xhci_queue_isoc_tx()
4110 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci, in xhci_queue_isoc_tx()
4115 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, in xhci_queue_isoc_tx()
4116 urb->stream_id, trbs_per_td, urb, i, mem_flags); in xhci_queue_isoc_tx()
4122 td = &urb_priv->td[i]; in xhci_queue_isoc_tx()
4125 if (!(urb->transfer_flags & URB_ISO_ASAP) && in xhci_queue_isoc_tx()
4126 HCC_CFC(xhci->hcc_params)) { in xhci_queue_isoc_tx()
4127 frame_id = xhci_get_isoc_frame_id(xhci, urb, i); in xhci_queue_isoc_tx()
4139 (i ? ep_ring->cycle_state : !start_cycle); in xhci_queue_isoc_tx()
4141 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */ in xhci_queue_isoc_tx()
4142 if (!xep->use_extended_tbc) in xhci_queue_isoc_tx()
4152 ep_ring->cycle_state; in xhci_queue_isoc_tx()
4159 if (j < trbs_per_td - 1) { in xhci_queue_isoc_tx()
4164 td->end_trb = ep_ring->enqueue; in xhci_queue_isoc_tx()
4165 td->end_seg = ep_ring->enq_seg; in xhci_queue_isoc_tx()
4167 if (trb_block_event_intr(xhci, num_tds, i, ir)) in xhci_queue_isoc_tx()
4176 remainder = xhci_td_remainder(xhci, running_total, in xhci_queue_isoc_tx()
4183 /* xhci 1.1 with ETE uses TD Size field for TBC */ in xhci_queue_isoc_tx()
4184 if (first_trb && xep->use_extended_tbc) in xhci_queue_isoc_tx()
4190 queue_trb(xhci, ep_ring, more_trbs_coming, in xhci_queue_isoc_tx()
4198 td_remain_len -= trb_buff_len; in xhci_queue_isoc_tx()
4203 xhci_err(xhci, "ISOC TD length unmatch\n"); in xhci_queue_isoc_tx()
4204 ret = -EINVAL; in xhci_queue_isoc_tx()
4210 if (HCC_CFC(xhci->hcc_params)) in xhci_queue_isoc_tx()
4211 xep->next_frame_id = urb->start_frame + num_tds * urb->interval; in xhci_queue_isoc_tx()
4213 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_queue_isoc_tx()
4214 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_queue_isoc_tx()
4217 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; in xhci_queue_isoc_tx()
4219 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_isoc_tx()
4225 for (i--; i >= 0; i--) in xhci_queue_isoc_tx()
4226 list_del_init(&urb_priv->td[i].td_list); in xhci_queue_isoc_tx()
4229 * into No-ops with a software-owned cycle bit. That way the hardware in xhci_queue_isoc_tx()
4231 * overwrite them. td->start_trb and td->start_seg are already set. in xhci_queue_isoc_tx()
4233 urb_priv->td[0].end_trb = ep_ring->enqueue; in xhci_queue_isoc_tx()
4235 td_to_noop(&urb_priv->td[0], true); in xhci_queue_isoc_tx()
4238 ep_ring->enqueue = urb_priv->td[0].start_trb; in xhci_queue_isoc_tx()
4239 ep_ring->enq_seg = urb_priv->td[0].start_seg; in xhci_queue_isoc_tx()
4240 ep_ring->cycle_state = start_cycle; in xhci_queue_isoc_tx()
4241 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); in xhci_queue_isoc_tx()
4248 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
4249 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
4252 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx_prepare() argument
4264 xdev = xhci->devs[slot_id]; in xhci_queue_isoc_tx_prepare()
4265 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx_prepare()
4266 ep_ring = xdev->eps[ep_index].ring; in xhci_queue_isoc_tx_prepare()
4267 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in xhci_queue_isoc_tx_prepare()
4270 num_tds = urb->number_of_packets; in xhci_queue_isoc_tx_prepare()
4277 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), in xhci_queue_isoc_tx_prepare()
4288 /* Calculate the start frame and put it in urb->start_frame. */ in xhci_queue_isoc_tx_prepare()
4289 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { in xhci_queue_isoc_tx_prepare()
4291 urb->start_frame = xep->next_frame_id; in xhci_queue_isoc_tx_prepare()
4296 start_frame = readl(&xhci->run_regs->microframe_index); in xhci_queue_isoc_tx_prepare()
4302 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_queue_isoc_tx_prepare()
4303 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_queue_isoc_tx_prepare()
4312 if (urb->dev->speed == USB_SPEED_LOW || in xhci_queue_isoc_tx_prepare()
4313 urb->dev->speed == USB_SPEED_FULL) { in xhci_queue_isoc_tx_prepare()
4314 start_frame = roundup(start_frame, urb->interval << 3); in xhci_queue_isoc_tx_prepare()
4315 urb->start_frame = start_frame >> 3; in xhci_queue_isoc_tx_prepare()
4317 start_frame = roundup(start_frame, urb->interval); in xhci_queue_isoc_tx_prepare()
4318 urb->start_frame = start_frame; in xhci_queue_isoc_tx_prepare()
4323 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_isoc_tx_prepare()
4328 /* Generic function for queueing a command TRB on the command ring.
4333 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
4336 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in queue_command() argument
4340 int reserved_trbs = xhci->cmd_ring_reserved_trbs; in queue_command()
4343 if ((xhci->xhc_state & XHCI_STATE_DYING) || in queue_command()
4344 (xhci->xhc_state & XHCI_STATE_HALTED)) { in queue_command()
4345 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); in queue_command()
4346 return -ESHUTDOWN; in queue_command()
4352 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, in queue_command()
4355 xhci_err(xhci, "ERR: No room for command on command ring\n"); in queue_command()
4357 xhci_err(xhci, "ERR: Reserved TRB counting for " in queue_command()
4362 cmd->command_trb = xhci->cmd_ring->enqueue; in queue_command()
4365 if (list_empty(&xhci->cmd_list)) { in queue_command()
4366 xhci->current_cmd = cmd; in queue_command()
4367 xhci_mod_cmd_timer(xhci); in queue_command()
4370 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); in queue_command()
4372 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, in queue_command()
4373 field4 | xhci->cmd_ring->cycle_state); in queue_command()
4378 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_slot_control() argument
4381 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_slot_control()
4386 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_address_device() argument
4389 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_address_device()
4395 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_vendor_command() argument
4398 return queue_command(xhci, cmd, field1, field2, field3, field4, false); in xhci_queue_vendor_command()
4402 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_device() argument
4405 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_device()
4411 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, in xhci_queue_configure_endpoint() argument
4415 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_configure_endpoint()
4422 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_evaluate_context() argument
4425 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_evaluate_context()
4435 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_stop_endpoint() argument
4443 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_stop_endpoint()
4447 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_ep() argument
4458 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_ep()