xref: /aosp_15_r20/external/coreboot/payloads/libpayload/drivers/usb/xhci_events.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /*
2  *
3  * Copyright (C) 2013 secunet Security Networks AG
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 //#define XHCI_SPEW_DEBUG
30 
31 #include <inttypes.h>
32 #include <arch/virtual.h>
33 #include "xhci_private.h"
34 
35 void
xhci_reset_event_ring(event_ring_t * const er)36 xhci_reset_event_ring(event_ring_t *const er)
37 {
38 	int i;
39 	for (i = 0; i < EVENT_RING_SIZE; ++i)
40 		er->ring[i].control &= ~TRB_CYCLE;
41 	er->cur		= er->ring;
42 	er->last	= er->ring + EVENT_RING_SIZE;
43 	er->ccs		= 1;
44 	er->adv		= 1;
45 }
46 
47 static inline int
xhci_event_ready(const event_ring_t * const er)48 xhci_event_ready(const event_ring_t *const er)
49 {
50 	return (er->cur->control & TRB_CYCLE) == er->ccs;
51 }
52 
53 void
xhci_update_event_dq(xhci_t * const xhci)54 xhci_update_event_dq(xhci_t *const xhci)
55 {
56 	if (xhci->er.adv) {
57 		xhci_spew("Updating dq ptr: @%p(0x%08"PRIx32") -> %p\n",
58 			  phys_to_virt(xhci->hcrreg->intrrs[0].erdp_lo),
59 			  xhci->hcrreg->intrrs[0].erdp_lo, xhci->er.cur);
60 		xhci->hcrreg->intrrs[0].erdp_lo = virt_to_phys(xhci->er.cur);
61 		xhci->hcrreg->intrrs[0].erdp_hi = 0;
62 		xhci->er.adv = 0;
63 	}
64 }
65 
66 void
xhci_advance_event_ring(xhci_t * const xhci)67 xhci_advance_event_ring(xhci_t *const xhci)
68 {
69 	xhci->er.cur++;
70 	xhci->er.adv = 1;
71 	if (xhci->er.cur == xhci->er.last) {
72 		xhci_spew("Roll over in event ring\n");
73 		xhci->er.cur = xhci->er.ring;
74 		xhci->er.ccs ^= 1;
75 		xhci_update_event_dq(xhci);
76 	}
77 }
78 
79 static void
xhci_handle_transfer_event(xhci_t * const xhci)80 xhci_handle_transfer_event(xhci_t *const xhci)
81 {
82 	const trb_t *const ev = xhci->er.cur;
83 
84 	const int cc = TRB_GET(CC, ev);
85 	const int id = TRB_GET(ID, ev);
86 	const int ep = TRB_GET(EP, ev);
87 
88 	intrq_t *intrq;
89 
90 	if (id && id <= xhci->max_slots_en &&
91 			(intrq = xhci->dev[id].interrupt_queues[ep])) {
92 		/* It's a running interrupt endpoint */
93 		intrq->ready = phys_to_virt(ev->ptr_low);
94 		if (cc == CC_SUCCESS || cc == CC_SHORT_PACKET) {
95 			TRB_SET(TL, intrq->ready,
96 				intrq->size - TRB_GET(EVTL, ev));
97 		} else {
98 			xhci_debug("Interrupt Transfer failed: %d\n",
99 				   cc);
100 			TRB_SET(TL, intrq->ready, 0);
101 		}
102 	} else if (cc == CC_STOPPED || cc == CC_STOPPED_LENGTH_INVALID) {
103 		/* Ignore 'Forced Stop Events' */
104 	} else {
105 		xhci_debug("Warning: "
106 			   "Spurious transfer event for ID %d, EP %d:\n"
107 			   "  Pointer: 0x%08x%08x\n"
108 			   "       TL: 0x%06x\n"
109 			   "       CC: %d\n",
110 			   id, ep,
111 			   ev->ptr_high, ev->ptr_low,
112 			   TRB_GET(EVTL, ev), cc);
113 	}
114 	xhci_advance_event_ring(xhci);
115 }
116 
117 static void
xhci_handle_command_completion_event(xhci_t * const xhci)118 xhci_handle_command_completion_event(xhci_t *const xhci)
119 {
120 	const trb_t *const ev = xhci->er.cur;
121 
122 	xhci_debug("Warning: Spurious command completion event:\n"
123 		   "  Pointer: 0x%08x%08x\n"
124 		   "       CC: %d\n"
125 		   "  Slot ID: %d\n"
126 		   "    Cycle: %d\n",
127 		   ev->ptr_high, ev->ptr_low,
128 		   TRB_GET(CC, ev), TRB_GET(ID, ev), ev->control & TRB_CYCLE);
129 	xhci_advance_event_ring(xhci);
130 }
131 
132 static void
xhci_handle_host_controller_event(xhci_t * const xhci)133 xhci_handle_host_controller_event(xhci_t *const xhci)
134 {
135 	const trb_t *const ev = xhci->er.cur;
136 
137 	const int cc = TRB_GET(CC, ev);
138 	switch (cc) {
139 	case CC_EVENT_RING_FULL_ERROR:
140 		xhci_debug("Event ring full! (@%p)\n", xhci->er.cur);
141 		/*
142 		 * If we get here, we have processed the whole queue:
143 		 * xHC pushes this event, when it sees the ring full,
144 		 * full of other events.
145 		 * IMO it's save and necessary to update the dequeue
146 		 * pointer here.
147 		 */
148 		xhci_advance_event_ring(xhci);
149 		xhci_update_event_dq(xhci);
150 		break;
151 	default:
152 		xhci_debug("Warning: Spurious host controller event: %d\n", cc);
153 		xhci_advance_event_ring(xhci);
154 		break;
155 	}
156 }
157 
158 /* handle standard types:
159  * - command completion event
160  * - port status change event
161  * - transfer event
162  * - host controller event
163  */
164 static void
xhci_handle_event(xhci_t * const xhci)165 xhci_handle_event(xhci_t *const xhci)
166 {
167 	const trb_t *const ev = xhci->er.cur;
168 
169 	const int trb_type = TRB_GET(TT, ev);
170 	switch (trb_type) {
171 		/* Either pass along the event or advance event ring */
172 	case TRB_EV_TRANSFER:
173 		xhci_handle_transfer_event(xhci);
174 		break;
175 	case TRB_EV_CMD_CMPL:
176 		xhci_handle_command_completion_event(xhci);
177 		break;
178 	case TRB_EV_PORTSC:
179 		xhci_debug("Port Status Change Event for %d: %d\n",
180 			   TRB_GET(PORT, ev), TRB_GET(CC, ev));
181 		/* We ignore the event as we look for the PORTSC
182 		   registers instead, at a time when it suits _us_. */
183 		xhci_advance_event_ring(xhci);
184 		break;
185 	case TRB_EV_HOST:
186 		xhci_handle_host_controller_event(xhci);
187 		break;
188 	default:
189 		xhci_debug("Warning: Spurious event: %d, Completion Code: %d\n",
190 			   trb_type, TRB_GET(CC, ev));
191 		xhci_advance_event_ring(xhci);
192 		break;
193 	}
194 }
195 
196 void
xhci_handle_events(xhci_t * const xhci)197 xhci_handle_events(xhci_t *const xhci)
198 {
199 	while (xhci_event_ready(&xhci->er))
200 		xhci_handle_event(xhci);
201 	xhci_update_event_dq(xhci);
202 }
203 
204 static unsigned long
xhci_wait_for_event(const event_ring_t * const er,unsigned long * const timeout_us)205 xhci_wait_for_event(const event_ring_t *const er,
206 		    unsigned long *const timeout_us)
207 {
208 	while (!xhci_event_ready(er) && *timeout_us) {
209 		--*timeout_us;
210 		udelay(1);
211 	}
212 	return *timeout_us;
213 }
214 
215 static unsigned long
xhci_wait_for_event_type(xhci_t * const xhci,const int trb_type,unsigned long * const timeout_us)216 xhci_wait_for_event_type(xhci_t *const xhci,
217 		    const int trb_type,
218 		    unsigned long *const timeout_us)
219 {
220 	while (xhci_wait_for_event(&xhci->er, timeout_us)) {
221 		if (TRB_GET(TT, xhci->er.cur) == trb_type)
222 			break;
223 
224 		xhci_handle_event(xhci);
225 	}
226 	return *timeout_us;
227 }
228 
229 /*
230  * Ref. xHCI Specification Revision 1.2, May 2019.
231  * Section 4.6.1.2.
232  *
233  * Process events from xHCI Abort command.
234  *
235  * Returns CC_COMMAND_RING_STOPPED on success and TIMEOUT on failure.
236  */
237 
238 int
xhci_wait_for_command_aborted(xhci_t * const xhci,const trb_t * const address)239 xhci_wait_for_command_aborted(xhci_t *const xhci, const trb_t *const address)
240 {
241 	/*
242 	 * Specification says that something might be seriously wrong, if
243 	 * we don't get a response after 5s. Still, let the caller decide,
244 	 * what to do then.
245 	 */
246 	unsigned long timeout_us = USB_MAX_PROCESSING_TIME_US; /* 5s */
247 	int cc = TIMEOUT;
248 	/*
249 	 * Expects two command completion events:
250 	 * The first with CC == COMMAND_ABORTED should point to address
251 	 * (not present if command was not running),
252 	 * the second with CC == COMMAND_RING_STOPPED should point to new dq.
253 	 */
254 	while (xhci_wait_for_event_type(xhci, TRB_EV_CMD_CMPL, &timeout_us)) {
255 		if ((xhci->er.cur->ptr_low == virt_to_phys(address)) &&
256 		    (xhci->er.cur->ptr_high == 0)) {
257 			cc = TRB_GET(CC, xhci->er.cur);
258 			xhci_advance_event_ring(xhci);
259 			break;
260 		}
261 
262 		xhci_handle_command_completion_event(xhci);
263 	}
264 	if (timeout_us == 0) {
265 		xhci_debug("Warning: Timed out waiting for "
266 			   "COMMAND_ABORTED or COMMAND_RING_STOPPED.\n");
267 		goto update_and_return;
268 	}
269 	if (cc == CC_COMMAND_RING_STOPPED) {
270 		/* There may not have been a command to abort. */
271 		goto update_and_return;
272 	}
273 
274 	timeout_us = USB_MAX_PROCESSING_TIME_US; /* 5s */
275 	while (xhci_wait_for_event_type(xhci, TRB_EV_CMD_CMPL, &timeout_us)) {
276 		if (TRB_GET(CC, xhci->er.cur) == CC_COMMAND_RING_STOPPED) {
277 			cc = CC_COMMAND_RING_STOPPED;
278 			xhci_advance_event_ring(xhci);
279 			break;
280 		}
281 
282 		xhci_handle_command_completion_event(xhci);
283 	}
284 	if (timeout_us == 0)
285 		xhci_debug("Warning: Timed out "
286 			   "waiting for COMMAND_RING_STOPPED.\n");
287 
288 update_and_return:
289 	xhci_update_event_dq(xhci);
290 	return cc;
291 }
292 
293 /*
294  * returns cc of command in question (pointed to by `address`)
295  * caller should abort command if cc is TIMEOUT
296  */
297 int
xhci_wait_for_command_done(xhci_t * const xhci,const trb_t * const address,const int clear_event)298 xhci_wait_for_command_done(xhci_t *const xhci,
299 			   const trb_t *const address,
300 			   const int clear_event)
301 {
302 	unsigned long timeout_us = USB_MAX_PROCESSING_TIME_US; /* 5s */
303 	int cc = TIMEOUT;
304 	while (xhci_wait_for_event_type(xhci, TRB_EV_CMD_CMPL, &timeout_us)) {
305 		if ((xhci->er.cur->ptr_low == virt_to_phys(address)) &&
306 				(xhci->er.cur->ptr_high == 0)) {
307 			cc = TRB_GET(CC, xhci->er.cur);
308 			break;
309 		}
310 
311 		xhci_handle_command_completion_event(xhci);
312 	}
313 	if (!timeout_us) {
314 		xhci_debug("Warning: Timed out waiting for TRB_EV_CMD_CMPL.\n");
315 	} else if (clear_event) {
316 		xhci_advance_event_ring(xhci);
317 	}
318 	xhci_update_event_dq(xhci);
319 	return cc;
320 }
321 
322 /* returns amount of bytes transferred on success, negative CC on error */
323 int
xhci_wait_for_transfer(xhci_t * const xhci,const int slot_id,const int ep_id)324 xhci_wait_for_transfer(xhci_t *const xhci, const int slot_id, const int ep_id)
325 {
326 	xhci_spew("Waiting for transfer on ID %d EP %d\n", slot_id, ep_id);
327 	/* 5s for all types of transfers */
328 	unsigned long timeout_us = USB_MAX_PROCESSING_TIME_US;
329 	int ret = TIMEOUT;
330 	while (xhci_wait_for_event_type(xhci, TRB_EV_TRANSFER, &timeout_us)) {
331 		if (TRB_GET(ID, xhci->er.cur) == slot_id &&
332 				TRB_GET(EP, xhci->er.cur) == ep_id) {
333 			ret = -TRB_GET(CC, xhci->er.cur);
334 			if (ret == -CC_SUCCESS || ret == -CC_SHORT_PACKET)
335 				ret = TRB_GET(EVTL, xhci->er.cur);
336 			xhci_advance_event_ring(xhci);
337 			break;
338 		}
339 
340 		xhci_handle_transfer_event(xhci);
341 	}
342 	if (!timeout_us)
343 		xhci_debug("Warning: Timed out waiting for TRB_EV_TRANSFER.\n");
344 	xhci_update_event_dq(xhci);
345 	return ret;
346 }
347