1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Cadence USBHS-DEV Driver - gadget side.
4 *
5 * Copyright (C) 2023 Cadence Design Systems.
6 *
7 * Authors: Pawel Laszczak <[email protected]>
8 */
9
10 /*
11 * Work around 1:
12 * At some situations, the controller may get stale data address in TRB
13 * at below sequences:
14 * 1. Controller read TRB includes data address
15 * 2. Software updates TRBs includes data address and Cycle bit
16 * 3. Controller read TRB which includes Cycle bit
17 * 4. DMA run with stale data address
18 *
19 * To fix this problem, driver needs to make the first TRB in TD as invalid.
20 * After preparing all TRBs driver needs to check the position of DMA and
21 * if the DMA point to the first just added TRB and doorbell is 1,
22 * then driver must defer making this TRB as valid. This TRB will be make
23 * as valid during adding next TRB only if DMA is stopped or at TRBERR
24 * interrupt.
25 *
26 */
27
28 #include <linux/dma-mapping.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/interrupt.h>
31 #include <linux/property.h>
32 #include <linux/string_choices.h>
33 #include <linux/dmapool.h>
34 #include <linux/iopoll.h>
35
36 #include "cdns2-gadget.h"
37 #include "cdns2-trace.h"
38
39 /**
40 * set_reg_bit_32 - set bit in given 32 bits register.
41 * @ptr: register address.
42 * @mask: bits to set.
43 */
set_reg_bit_32(void __iomem * ptr,u32 mask)44 static void set_reg_bit_32(void __iomem *ptr, u32 mask)
45 {
46 mask = readl(ptr) | mask;
47 writel(mask, ptr);
48 }
49
50 /*
51 * clear_reg_bit_32 - clear bit in given 32 bits register.
52 * @ptr: register address.
53 * @mask: bits to clear.
54 */
clear_reg_bit_32(void __iomem * ptr,u32 mask)55 static void clear_reg_bit_32(void __iomem *ptr, u32 mask)
56 {
57 mask = readl(ptr) & ~mask;
58 writel(mask, ptr);
59 }
60
61 /* Clear bit in given 8 bits register. */
clear_reg_bit_8(void __iomem * ptr,u8 mask)62 static void clear_reg_bit_8(void __iomem *ptr, u8 mask)
63 {
64 mask = readb(ptr) & ~mask;
65 writeb(mask, ptr);
66 }
67
68 /* Set bit in given 16 bits register. */
set_reg_bit_8(void __iomem * ptr,u8 mask)69 void set_reg_bit_8(void __iomem *ptr, u8 mask)
70 {
71 mask = readb(ptr) | mask;
72 writeb(mask, ptr);
73 }
74
cdns2_get_dma_pos(struct cdns2_device * pdev,struct cdns2_endpoint * pep)75 static int cdns2_get_dma_pos(struct cdns2_device *pdev,
76 struct cdns2_endpoint *pep)
77 {
78 int dma_index;
79
80 dma_index = readl(&pdev->adma_regs->ep_traddr) - pep->ring.dma;
81
82 return dma_index / TRB_SIZE;
83 }
84
85 /* Get next private request from list. */
cdns2_next_preq(struct list_head * list)86 struct cdns2_request *cdns2_next_preq(struct list_head *list)
87 {
88 return list_first_entry_or_null(list, struct cdns2_request, list);
89 }
90
cdns2_select_ep(struct cdns2_device * pdev,u32 ep)91 void cdns2_select_ep(struct cdns2_device *pdev, u32 ep)
92 {
93 if (pdev->selected_ep == ep)
94 return;
95
96 pdev->selected_ep = ep;
97 writel(ep, &pdev->adma_regs->ep_sel);
98 }
99
cdns2_trb_virt_to_dma(struct cdns2_endpoint * pep,struct cdns2_trb * trb)100 dma_addr_t cdns2_trb_virt_to_dma(struct cdns2_endpoint *pep,
101 struct cdns2_trb *trb)
102 {
103 u32 offset = (char *)trb - (char *)pep->ring.trbs;
104
105 return pep->ring.dma + offset;
106 }
107
cdns2_free_tr_segment(struct cdns2_endpoint * pep)108 static void cdns2_free_tr_segment(struct cdns2_endpoint *pep)
109 {
110 struct cdns2_device *pdev = pep->pdev;
111 struct cdns2_ring *ring = &pep->ring;
112
113 if (pep->ring.trbs) {
114 dma_pool_free(pdev->eps_dma_pool, ring->trbs, ring->dma);
115 memset(ring, 0, sizeof(*ring));
116 }
117 }
118
119 /* Allocates Transfer Ring segment. */
cdns2_alloc_tr_segment(struct cdns2_endpoint * pep)120 static int cdns2_alloc_tr_segment(struct cdns2_endpoint *pep)
121 {
122 struct cdns2_device *pdev = pep->pdev;
123 struct cdns2_trb *link_trb;
124 struct cdns2_ring *ring;
125
126 ring = &pep->ring;
127
128 if (!ring->trbs) {
129 ring->trbs = dma_pool_alloc(pdev->eps_dma_pool,
130 GFP_DMA32 | GFP_ATOMIC,
131 &ring->dma);
132 if (!ring->trbs)
133 return -ENOMEM;
134 }
135
136 memset(ring->trbs, 0, TR_SEG_SIZE);
137
138 if (!pep->num)
139 return 0;
140
141 /* Initialize the last TRB as Link TRB */
142 link_trb = (ring->trbs + (TRBS_PER_SEGMENT - 1));
143 link_trb->buffer = cpu_to_le32(TRB_BUFFER(ring->dma));
144 link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) |
145 TRB_TOGGLE);
146
147 return 0;
148 }
149
150 /*
151 * Stalls and flushes selected endpoint.
152 * Endpoint must be selected before invoking this function.
153 */
cdns2_ep_stall_flush(struct cdns2_endpoint * pep)154 static void cdns2_ep_stall_flush(struct cdns2_endpoint *pep)
155 {
156 struct cdns2_device *pdev = pep->pdev;
157 int val;
158
159 trace_cdns2_ep_halt(pep, 1, 1);
160
161 writel(DMA_EP_CMD_DFLUSH, &pdev->adma_regs->ep_cmd);
162
163 /* Wait for DFLUSH cleared. */
164 readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
165 !(val & DMA_EP_CMD_DFLUSH), 1, 1000);
166 pep->ep_state |= EP_STALLED;
167 pep->ep_state &= ~EP_STALL_PENDING;
168 }
169
170 /*
171 * Increment a trb index.
172 *
173 * The index should never point to the last link TRB in TR. After incrementing,
174 * if it point to the link TRB, wrap around to the beginning and revert
175 * cycle state bit. The link TRB is always at the last TRB entry.
176 */
cdns2_ep_inc_trb(int * index,u8 * cs,int trb_in_seg)177 static void cdns2_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
178 {
179 (*index)++;
180 if (*index == (trb_in_seg - 1)) {
181 *index = 0;
182 *cs ^= 1;
183 }
184 }
185
cdns2_ep_inc_enq(struct cdns2_ring * ring)186 static void cdns2_ep_inc_enq(struct cdns2_ring *ring)
187 {
188 ring->free_trbs--;
189 cdns2_ep_inc_trb(&ring->enqueue, &ring->pcs, TRBS_PER_SEGMENT);
190 }
191
cdns2_ep_inc_deq(struct cdns2_ring * ring)192 static void cdns2_ep_inc_deq(struct cdns2_ring *ring)
193 {
194 ring->free_trbs++;
195 cdns2_ep_inc_trb(&ring->dequeue, &ring->ccs, TRBS_PER_SEGMENT);
196 }
197
198 /*
199 * Enable/disable LPM.
200 *
201 * If bit USBCS_LPMNYET is not set and device receive Extended Token packet,
202 * then controller answer with ACK handshake.
203 * If bit USBCS_LPMNYET is set and device receive Extended Token packet,
204 * then controller answer with NYET handshake.
205 */
cdns2_enable_l1(struct cdns2_device * pdev,int enable)206 static void cdns2_enable_l1(struct cdns2_device *pdev, int enable)
207 {
208 if (enable) {
209 clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_LPMNYET);
210 writeb(LPMCLOCK_SLEEP_ENTRY, &pdev->usb_regs->lpmclock);
211 } else {
212 set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_LPMNYET);
213 }
214 }
215
cdns2_get_speed(struct cdns2_device * pdev)216 static enum usb_device_speed cdns2_get_speed(struct cdns2_device *pdev)
217 {
218 u8 speed = readb(&pdev->usb_regs->speedctrl);
219
220 if (speed & SPEEDCTRL_HS)
221 return USB_SPEED_HIGH;
222 else if (speed & SPEEDCTRL_FS)
223 return USB_SPEED_FULL;
224
225 return USB_SPEED_UNKNOWN;
226 }
227
cdns2_next_trb(struct cdns2_endpoint * pep,struct cdns2_trb * trb)228 static struct cdns2_trb *cdns2_next_trb(struct cdns2_endpoint *pep,
229 struct cdns2_trb *trb)
230 {
231 if (trb == (pep->ring.trbs + (TRBS_PER_SEGMENT - 1)))
232 return pep->ring.trbs;
233 else
234 return ++trb;
235 }
236
cdns2_gadget_giveback(struct cdns2_endpoint * pep,struct cdns2_request * preq,int status)237 void cdns2_gadget_giveback(struct cdns2_endpoint *pep,
238 struct cdns2_request *preq,
239 int status)
240 {
241 struct usb_request *request = &preq->request;
242 struct cdns2_device *pdev = pep->pdev;
243
244 list_del_init(&preq->list);
245
246 if (request->status == -EINPROGRESS)
247 request->status = status;
248
249 usb_gadget_unmap_request_by_dev(pdev->dev, request, pep->dir);
250
251 /* All TRBs have finished, clear the counter. */
252 preq->finished_trb = 0;
253
254 trace_cdns2_request_giveback(preq);
255
256 if (request->complete) {
257 spin_unlock(&pdev->lock);
258 usb_gadget_giveback_request(&pep->endpoint, request);
259 spin_lock(&pdev->lock);
260 }
261
262 if (request->buf == pdev->zlp_buf)
263 cdns2_gadget_ep_free_request(&pep->endpoint, request);
264 }
265
cdns2_wa1_restore_cycle_bit(struct cdns2_endpoint * pep)266 static void cdns2_wa1_restore_cycle_bit(struct cdns2_endpoint *pep)
267 {
268 /* Work around for stale data address in TRB. */
269 if (pep->wa1_set) {
270 trace_cdns2_wa1(pep, "restore cycle bit");
271
272 pep->wa1_set = 0;
273 pep->wa1_trb_index = 0xFFFF;
274 if (pep->wa1_cycle_bit)
275 pep->wa1_trb->control |= cpu_to_le32(0x1);
276 else
277 pep->wa1_trb->control &= cpu_to_le32(~0x1);
278 }
279 }
280
cdns2_wa1_update_guard(struct cdns2_endpoint * pep,struct cdns2_trb * trb)281 static int cdns2_wa1_update_guard(struct cdns2_endpoint *pep,
282 struct cdns2_trb *trb)
283 {
284 struct cdns2_device *pdev = pep->pdev;
285
286 if (!pep->wa1_set) {
287 u32 doorbell;
288
289 doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
290
291 if (doorbell) {
292 pep->wa1_cycle_bit = pep->ring.pcs ? TRB_CYCLE : 0;
293 pep->wa1_set = 1;
294 pep->wa1_trb = trb;
295 pep->wa1_trb_index = pep->ring.enqueue;
296 trace_cdns2_wa1(pep, "set guard");
297 return 0;
298 }
299 }
300 return 1;
301 }
302
cdns2_wa1_tray_restore_cycle_bit(struct cdns2_device * pdev,struct cdns2_endpoint * pep)303 static void cdns2_wa1_tray_restore_cycle_bit(struct cdns2_device *pdev,
304 struct cdns2_endpoint *pep)
305 {
306 int dma_index;
307 u32 doorbell;
308
309 doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
310 dma_index = cdns2_get_dma_pos(pdev, pep);
311
312 if (!doorbell || dma_index != pep->wa1_trb_index)
313 cdns2_wa1_restore_cycle_bit(pep);
314 }
315
cdns2_prepare_ring(struct cdns2_device * pdev,struct cdns2_endpoint * pep,int num_trbs)316 static int cdns2_prepare_ring(struct cdns2_device *pdev,
317 struct cdns2_endpoint *pep,
318 int num_trbs)
319 {
320 struct cdns2_trb *link_trb = NULL;
321 int doorbell, dma_index;
322 struct cdns2_ring *ring;
323 u32 ch_bit = 0;
324
325 ring = &pep->ring;
326
327 if (num_trbs > ring->free_trbs) {
328 pep->ep_state |= EP_RING_FULL;
329 trace_cdns2_no_room_on_ring("Ring full\n");
330 return -ENOBUFS;
331 }
332
333 if ((ring->enqueue + num_trbs) >= (TRBS_PER_SEGMENT - 1)) {
334 doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
335 dma_index = cdns2_get_dma_pos(pdev, pep);
336
337 /* Driver can't update LINK TRB if it is current processed. */
338 if (doorbell && dma_index == TRBS_PER_SEGMENT - 1) {
339 pep->ep_state |= EP_DEFERRED_DRDY;
340 return -ENOBUFS;
341 }
342
343 /* Update C bt in Link TRB before starting DMA. */
344 link_trb = ring->trbs + (TRBS_PER_SEGMENT - 1);
345
346 /*
347 * For TRs size equal 2 enabling TRB_CHAIN for epXin causes
348 * that DMA stuck at the LINK TRB.
349 * On the other hand, removing TRB_CHAIN for longer TRs for
350 * epXout cause that DMA stuck after handling LINK TRB.
351 * To eliminate this strange behavioral driver set TRB_CHAIN
352 * bit only for TR size > 2.
353 */
354 if (pep->type == USB_ENDPOINT_XFER_ISOC || TRBS_PER_SEGMENT > 2)
355 ch_bit = TRB_CHAIN;
356
357 link_trb->control = cpu_to_le32(((ring->pcs) ? TRB_CYCLE : 0) |
358 TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit);
359 }
360
361 return 0;
362 }
363
cdns2_dbg_request_trbs(struct cdns2_endpoint * pep,struct cdns2_request * preq)364 static void cdns2_dbg_request_trbs(struct cdns2_endpoint *pep,
365 struct cdns2_request *preq)
366 {
367 struct cdns2_trb *link_trb = pep->ring.trbs + (TRBS_PER_SEGMENT - 1);
368 struct cdns2_trb *trb = preq->trb;
369 int num_trbs = preq->num_of_trb;
370 int i = 0;
371
372 while (i < num_trbs) {
373 trace_cdns2_queue_trb(pep, trb + i);
374 if (trb + i == link_trb) {
375 trb = pep->ring.trbs;
376 num_trbs = num_trbs - i;
377 i = 0;
378 } else {
379 i++;
380 }
381 }
382 }
383
cdns2_count_trbs(struct cdns2_endpoint * pep,u64 addr,u64 len)384 static unsigned int cdns2_count_trbs(struct cdns2_endpoint *pep,
385 u64 addr, u64 len)
386 {
387 unsigned int num_trbs = 1;
388
389 if (pep->type == USB_ENDPOINT_XFER_ISOC) {
390 /*
391 * To speed up DMA performance address should not exceed 4KB.
392 * for high bandwidth transfer and driver will split
393 * such buffer into two TRBs.
394 */
395 num_trbs = DIV_ROUND_UP(len +
396 (addr & (TRB_MAX_ISO_BUFF_SIZE - 1)),
397 TRB_MAX_ISO_BUFF_SIZE);
398
399 if (pep->interval > 1)
400 num_trbs = pep->dir ? num_trbs * pep->interval : 1;
401 } else if (pep->dir) {
402 /*
403 * One extra link trb for IN direction.
404 * Sometimes DMA doesn't want advance to next TD and transfer
405 * hangs. This extra Link TRB force DMA to advance to next TD.
406 */
407 num_trbs++;
408 }
409
410 return num_trbs;
411 }
412
cdns2_count_sg_trbs(struct cdns2_endpoint * pep,struct usb_request * req)413 static unsigned int cdns2_count_sg_trbs(struct cdns2_endpoint *pep,
414 struct usb_request *req)
415 {
416 unsigned int i, len, full_len, num_trbs = 0;
417 struct scatterlist *sg;
418 int trb_len = 0;
419
420 full_len = req->length;
421
422 for_each_sg(req->sg, sg, req->num_sgs, i) {
423 len = sg_dma_len(sg);
424 num_trbs += cdns2_count_trbs(pep, sg_dma_address(sg), len);
425 len = min(len, full_len);
426
427 /*
428 * For HS ISO transfer TRBs should not exceed max packet size.
429 * When DMA is working, and data exceed max packet size then
430 * some data will be read in single mode instead burst mode.
431 * This behavior will drastically reduce the copying speed.
432 * To avoid this we need one or two extra TRBs.
433 * This issue occurs for UVC class with sg_supported = 1
434 * because buffers addresses are not aligned to 1024.
435 */
436 if (pep->type == USB_ENDPOINT_XFER_ISOC) {
437 u8 temp;
438
439 trb_len += len;
440 temp = trb_len >> 10;
441
442 if (temp) {
443 if (trb_len % 1024)
444 num_trbs = num_trbs + temp;
445 else
446 num_trbs = num_trbs + temp - 1;
447
448 trb_len = trb_len - (temp << 10);
449 }
450 }
451
452 full_len -= len;
453 if (full_len == 0)
454 break;
455 }
456
457 return num_trbs;
458 }
459
460 /*
461 * Function prepares the array with optimized AXI burst value for different
462 * transfer lengths. Controller handles the final data which are less
463 * then AXI burst size as single byte transactions.
464 * e.g.:
465 * Let's assume that driver prepares trb with trb->length 700 and burst size
466 * will be set to 128. In this case the controller will handle a first 512 as
467 * single AXI transaction but the next 188 bytes will be handled
468 * as 47 separate AXI transaction.
469 * The better solution is to use the burst size equal 16 and then we will
470 * have only 25 AXI transaction (10 * 64 + 15 *4).
471 */
cdsn2_isoc_burst_opt(struct cdns2_device * pdev)472 static void cdsn2_isoc_burst_opt(struct cdns2_device *pdev)
473 {
474 int axi_burst_option[] = {1, 2, 4, 8, 16, 32, 64, 128};
475 int best_burst;
476 int array_size;
477 int opt_burst;
478 int trb_size;
479 int i, j;
480
481 array_size = ARRAY_SIZE(axi_burst_option);
482
483 for (i = 0; i <= MAX_ISO_SIZE; i++) {
484 trb_size = i / 4;
485 best_burst = trb_size ? trb_size : 1;
486
487 for (j = 0; j < array_size; j++) {
488 opt_burst = trb_size / axi_burst_option[j];
489 opt_burst += trb_size % axi_burst_option[j];
490
491 if (opt_burst < best_burst) {
492 best_burst = opt_burst;
493 pdev->burst_opt[i] = axi_burst_option[j];
494 }
495 }
496 }
497 }
498
cdns2_ep_tx_isoc(struct cdns2_endpoint * pep,struct cdns2_request * preq,int num_trbs)499 static void cdns2_ep_tx_isoc(struct cdns2_endpoint *pep,
500 struct cdns2_request *preq,
501 int num_trbs)
502 {
503 struct scatterlist *sg = NULL;
504 u32 remaining_packet_size = 0;
505 struct cdns2_trb *trb;
506 bool first_trb = true;
507 dma_addr_t trb_dma;
508 u32 trb_buff_len;
509 u32 block_length;
510 int td_idx = 0;
511 int split_size;
512 u32 full_len;
513 int enqd_len;
514 int sent_len;
515 int sg_iter;
516 u32 control;
517 int num_tds;
518 u32 length;
519
520 /*
521 * For OUT direction 1 TD per interval is enough
522 * because TRBs are not dumped by controller.
523 */
524 num_tds = pep->dir ? pep->interval : 1;
525 split_size = preq->request.num_sgs ? 1024 : 3072;
526
527 for (td_idx = 0; td_idx < num_tds; td_idx++) {
528 if (preq->request.num_sgs) {
529 sg = preq->request.sg;
530 trb_dma = sg_dma_address(sg);
531 block_length = sg_dma_len(sg);
532 } else {
533 trb_dma = preq->request.dma;
534 block_length = preq->request.length;
535 }
536
537 full_len = preq->request.length;
538 sg_iter = preq->request.num_sgs ? preq->request.num_sgs : 1;
539 remaining_packet_size = split_size;
540
541 for (enqd_len = 0; enqd_len < full_len;
542 enqd_len += trb_buff_len) {
543 if (remaining_packet_size == 0)
544 remaining_packet_size = split_size;
545
546 /*
547 * Calculate TRB length.- buffer can't across 4KB
548 * and max packet size.
549 */
550 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(trb_dma);
551 trb_buff_len = min(trb_buff_len, remaining_packet_size);
552 trb_buff_len = min(trb_buff_len, block_length);
553
554 if (trb_buff_len > full_len - enqd_len)
555 trb_buff_len = full_len - enqd_len;
556
557 control = TRB_TYPE(TRB_NORMAL);
558
559 /*
560 * For IN direction driver has to set the IOC for
561 * last TRB in last TD.
562 * For OUT direction driver must set IOC and ISP
563 * only for last TRB in each TDs.
564 */
565 if (enqd_len + trb_buff_len >= full_len || !pep->dir)
566 control |= TRB_IOC | TRB_ISP;
567
568 /*
569 * Don't give the first TRB to the hardware (by toggling
570 * the cycle bit) until we've finished creating all the
571 * other TRBs.
572 */
573 if (first_trb) {
574 first_trb = false;
575 if (pep->ring.pcs == 0)
576 control |= TRB_CYCLE;
577 } else {
578 control |= pep->ring.pcs;
579 }
580
581 if (enqd_len + trb_buff_len < full_len)
582 control |= TRB_CHAIN;
583
584 length = TRB_LEN(trb_buff_len) |
585 TRB_BURST(pep->pdev->burst_opt[trb_buff_len]);
586
587 trb = pep->ring.trbs + pep->ring.enqueue;
588 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma));
589 trb->length = cpu_to_le32(length);
590 trb->control = cpu_to_le32(control);
591
592 trb_dma += trb_buff_len;
593 sent_len = trb_buff_len;
594
595 if (sg && sent_len >= block_length) {
596 /* New sg entry */
597 --sg_iter;
598 sent_len -= block_length;
599 if (sg_iter != 0) {
600 sg = sg_next(sg);
601 trb_dma = sg_dma_address(sg);
602 block_length = sg_dma_len(sg);
603 }
604 }
605
606 remaining_packet_size -= trb_buff_len;
607 block_length -= sent_len;
608 preq->end_trb = pep->ring.enqueue;
609
610 cdns2_ep_inc_enq(&pep->ring);
611 }
612 }
613 }
614
cdns2_ep_tx_bulk(struct cdns2_endpoint * pep,struct cdns2_request * preq,int trbs_per_td)615 static void cdns2_ep_tx_bulk(struct cdns2_endpoint *pep,
616 struct cdns2_request *preq,
617 int trbs_per_td)
618 {
619 struct scatterlist *sg = NULL;
620 struct cdns2_ring *ring;
621 struct cdns2_trb *trb;
622 dma_addr_t trb_dma;
623 int sg_iter = 0;
624 u32 control;
625 u32 length;
626
627 if (preq->request.num_sgs) {
628 sg = preq->request.sg;
629 trb_dma = sg_dma_address(sg);
630 length = sg_dma_len(sg);
631 } else {
632 trb_dma = preq->request.dma;
633 length = preq->request.length;
634 }
635
636 ring = &pep->ring;
637
638 for (sg_iter = 0; sg_iter < trbs_per_td; sg_iter++) {
639 control = TRB_TYPE(TRB_NORMAL) | ring->pcs | TRB_ISP;
640 trb = pep->ring.trbs + ring->enqueue;
641
642 if (pep->dir && sg_iter == trbs_per_td - 1) {
643 preq->end_trb = ring->enqueue;
644 control = ring->pcs | TRB_TYPE(TRB_LINK) | TRB_CHAIN
645 | TRB_IOC;
646 cdns2_ep_inc_enq(&pep->ring);
647
648 if (ring->enqueue == 0)
649 control |= TRB_TOGGLE;
650
651 /* Point to next bad TRB. */
652 trb->buffer = cpu_to_le32(pep->ring.dma +
653 (ring->enqueue * TRB_SIZE));
654 trb->length = 0;
655 trb->control = cpu_to_le32(control);
656 break;
657 }
658
659 /*
660 * Don't give the first TRB to the hardware (by toggling
661 * the cycle bit) until we've finished creating all the
662 * other TRBs.
663 */
664 if (sg_iter == 0)
665 control = control ^ TRB_CYCLE;
666
667 /* For last TRB in TD. */
668 if (sg_iter == (trbs_per_td - (pep->dir ? 2 : 1)))
669 control |= TRB_IOC;
670 else
671 control |= TRB_CHAIN;
672
673 trb->buffer = cpu_to_le32(trb_dma);
674 trb->length = cpu_to_le32(TRB_BURST(pep->trb_burst_size) |
675 TRB_LEN(length));
676 trb->control = cpu_to_le32(control);
677
678 if (sg && sg_iter < (trbs_per_td - 1)) {
679 sg = sg_next(sg);
680 trb_dma = sg_dma_address(sg);
681 length = sg_dma_len(sg);
682 }
683
684 preq->end_trb = ring->enqueue;
685 cdns2_ep_inc_enq(&pep->ring);
686 }
687 }
688
cdns2_set_drdy(struct cdns2_device * pdev,struct cdns2_endpoint * pep)689 static void cdns2_set_drdy(struct cdns2_device *pdev,
690 struct cdns2_endpoint *pep)
691 {
692 trace_cdns2_ring(pep);
693
694 /*
695 * Memory barrier - Cycle Bit must be set before doorbell.
696 */
697 dma_wmb();
698
699 /* Clearing TRBERR and DESCMIS before setting DRDY. */
700 writel(DMA_EP_STS_TRBERR | DMA_EP_STS_DESCMIS,
701 &pdev->adma_regs->ep_sts);
702 writel(DMA_EP_CMD_DRDY, &pdev->adma_regs->ep_cmd);
703
704 if (readl(&pdev->adma_regs->ep_sts) & DMA_EP_STS_TRBERR) {
705 writel(DMA_EP_STS_TRBERR, &pdev->adma_regs->ep_sts);
706 writel(DMA_EP_CMD_DRDY, &pdev->adma_regs->ep_cmd);
707 }
708
709 trace_cdns2_doorbell_epx(pep, readl(&pdev->adma_regs->ep_traddr));
710 }
711
cdns2_prepare_first_isoc_transfer(struct cdns2_device * pdev,struct cdns2_endpoint * pep)712 static int cdns2_prepare_first_isoc_transfer(struct cdns2_device *pdev,
713 struct cdns2_endpoint *pep)
714 {
715 struct cdns2_trb *trb;
716 u32 buffer;
717 u8 hw_ccs;
718
719 if ((readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY))
720 return -EBUSY;
721
722 if (!pep->dir) {
723 set_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
724 writel(pep->ring.dma + pep->ring.dequeue,
725 &pdev->adma_regs->ep_traddr);
726 return 0;
727 }
728
729 /*
730 * The first packet after doorbell can be corrupted so,
731 * driver prepares 0 length packet as first packet.
732 */
733 buffer = pep->ring.dma + pep->ring.dequeue * TRB_SIZE;
734 hw_ccs = !!DMA_EP_STS_CCS(readl(&pdev->adma_regs->ep_sts));
735
736 trb = &pep->ring.trbs[TRBS_PER_SEGMENT];
737 trb->length = 0;
738 trb->buffer = cpu_to_le32(TRB_BUFFER(buffer));
739 trb->control = cpu_to_le32((hw_ccs ? TRB_CYCLE : 0) | TRB_TYPE(TRB_NORMAL));
740
741 /*
742 * LINK TRB is used to force updating cycle bit in controller and
743 * move to correct place in transfer ring.
744 */
745 trb++;
746 trb->length = 0;
747 trb->buffer = cpu_to_le32(TRB_BUFFER(buffer));
748 trb->control = cpu_to_le32((hw_ccs ? TRB_CYCLE : 0) |
749 TRB_TYPE(TRB_LINK) | TRB_CHAIN);
750
751 if (hw_ccs != pep->ring.ccs)
752 trb->control |= cpu_to_le32(TRB_TOGGLE);
753
754 set_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
755 writel(pep->ring.dma + (TRBS_PER_SEGMENT * TRB_SIZE),
756 &pdev->adma_regs->ep_traddr);
757
758 return 0;
759 }
760
761 /* Prepare and start transfer on no-default endpoint. */
cdns2_ep_run_transfer(struct cdns2_endpoint * pep,struct cdns2_request * preq)762 static int cdns2_ep_run_transfer(struct cdns2_endpoint *pep,
763 struct cdns2_request *preq)
764 {
765 struct cdns2_device *pdev = pep->pdev;
766 struct cdns2_ring *ring;
767 u32 togle_pcs = 1;
768 int num_trbs;
769 int ret;
770
771 cdns2_select_ep(pdev, pep->endpoint.address);
772
773 if (preq->request.sg)
774 num_trbs = cdns2_count_sg_trbs(pep, &preq->request);
775 else
776 num_trbs = cdns2_count_trbs(pep, preq->request.dma,
777 preq->request.length);
778
779 ret = cdns2_prepare_ring(pdev, pep, num_trbs);
780 if (ret)
781 return ret;
782
783 ring = &pep->ring;
784 preq->start_trb = ring->enqueue;
785 preq->trb = ring->trbs + ring->enqueue;
786
787 if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
788 cdns2_ep_tx_isoc(pep, preq, num_trbs);
789 } else {
790 togle_pcs = cdns2_wa1_update_guard(pep, ring->trbs + ring->enqueue);
791 cdns2_ep_tx_bulk(pep, preq, num_trbs);
792 }
793
794 preq->num_of_trb = num_trbs;
795
796 /*
797 * Memory barrier - cycle bit must be set as the last operation.
798 */
799 dma_wmb();
800
801 /* Give the TD to the consumer. */
802 if (togle_pcs)
803 preq->trb->control = preq->trb->control ^ cpu_to_le32(1);
804
805 cdns2_wa1_tray_restore_cycle_bit(pdev, pep);
806 cdns2_dbg_request_trbs(pep, preq);
807
808 if (!pep->wa1_set && !(pep->ep_state & EP_STALLED) && !pep->skip) {
809 if (pep->type == USB_ENDPOINT_XFER_ISOC) {
810 ret = cdns2_prepare_first_isoc_transfer(pdev, pep);
811 if (ret)
812 return 0;
813 }
814
815 cdns2_set_drdy(pdev, pep);
816 }
817
818 return 0;
819 }
820
821 /* Prepare and start transfer for all not started requests. */
cdns2_start_all_request(struct cdns2_device * pdev,struct cdns2_endpoint * pep)822 static int cdns2_start_all_request(struct cdns2_device *pdev,
823 struct cdns2_endpoint *pep)
824 {
825 struct cdns2_request *preq;
826 int ret;
827
828 while (!list_empty(&pep->deferred_list)) {
829 preq = cdns2_next_preq(&pep->deferred_list);
830
831 ret = cdns2_ep_run_transfer(pep, preq);
832 if (ret)
833 return ret;
834
835 list_move_tail(&preq->list, &pep->pending_list);
836 }
837
838 pep->ep_state &= ~EP_RING_FULL;
839
840 return 0;
841 }
842
843 /*
844 * Check whether trb has been handled by DMA.
845 *
846 * Endpoint must be selected before invoking this function.
847 *
848 * Returns false if request has not been handled by DMA, else returns true.
849 *
850 * SR - start ring
851 * ER - end ring
852 * DQ = ring->dequeue - dequeue position
853 * EQ = ring->enqueue - enqueue position
854 * ST = preq->start_trb - index of first TRB in transfer ring
855 * ET = preq->end_trb - index of last TRB in transfer ring
856 * CI = current_index - index of processed TRB by DMA.
857 *
858 * As first step, we check if the TRB between the ST and ET.
859 * Then, we check if cycle bit for index pep->dequeue
860 * is correct.
861 *
862 * some rules:
863 * 1. ring->dequeue never equals to current_index.
864 * 2 ring->enqueue never exceed ring->dequeue
865 * 3. exception: ring->enqueue == ring->dequeue
866 * and ring->free_trbs is zero.
867 * This case indicate that TR is full.
868 *
869 * At below two cases, the request have been handled.
870 * Case 1 - ring->dequeue < current_index
871 * SR ... EQ ... DQ ... CI ... ER
872 * SR ... DQ ... CI ... EQ ... ER
873 *
874 * Case 2 - ring->dequeue > current_index
875 * This situation takes place when CI go through the LINK TRB at the end of
876 * transfer ring.
877 * SR ... CI ... EQ ... DQ ... ER
878 */
cdns2_trb_handled(struct cdns2_endpoint * pep,struct cdns2_request * preq)879 static bool cdns2_trb_handled(struct cdns2_endpoint *pep,
880 struct cdns2_request *preq)
881 {
882 struct cdns2_device *pdev = pep->pdev;
883 struct cdns2_ring *ring;
884 struct cdns2_trb *trb;
885 int current_index = 0;
886 int handled = 0;
887 int doorbell;
888
889 ring = &pep->ring;
890 current_index = cdns2_get_dma_pos(pdev, pep);
891 doorbell = !!(readl(&pdev->adma_regs->ep_cmd) & DMA_EP_CMD_DRDY);
892
893 /*
894 * Only ISO transfer can use 2 entries outside the standard
895 * Transfer Ring. First of them is used as zero length packet and the
896 * second as LINK TRB.
897 */
898 if (current_index >= TRBS_PER_SEGMENT)
899 goto finish;
900
901 /* Current trb doesn't belong to this request. */
902 if (preq->start_trb < preq->end_trb) {
903 if (ring->dequeue > preq->end_trb)
904 goto finish;
905
906 if (ring->dequeue < preq->start_trb)
907 goto finish;
908 }
909
910 if (preq->start_trb > preq->end_trb && ring->dequeue > preq->end_trb &&
911 ring->dequeue < preq->start_trb)
912 goto finish;
913
914 if (preq->start_trb == preq->end_trb && ring->dequeue != preq->end_trb)
915 goto finish;
916
917 trb = &ring->trbs[ring->dequeue];
918
919 if ((le32_to_cpu(trb->control) & TRB_CYCLE) != ring->ccs)
920 goto finish;
921
922 if (doorbell == 1 && current_index == ring->dequeue)
923 goto finish;
924
925 /* The corner case for TRBS_PER_SEGMENT equal 2). */
926 if (TRBS_PER_SEGMENT == 2 && pep->type != USB_ENDPOINT_XFER_ISOC) {
927 handled = 1;
928 goto finish;
929 }
930
931 if (ring->enqueue == ring->dequeue &&
932 ring->free_trbs == 0) {
933 handled = 1;
934 } else if (ring->dequeue < current_index) {
935 if ((current_index == (TRBS_PER_SEGMENT - 1)) &&
936 !ring->dequeue)
937 goto finish;
938
939 handled = 1;
940 } else if (ring->dequeue > current_index) {
941 handled = 1;
942 }
943
944 finish:
945 trace_cdns2_request_handled(preq, current_index, handled);
946
947 return handled;
948 }
949
cdns2_skip_isoc_td(struct cdns2_device * pdev,struct cdns2_endpoint * pep,struct cdns2_request * preq)950 static void cdns2_skip_isoc_td(struct cdns2_device *pdev,
951 struct cdns2_endpoint *pep,
952 struct cdns2_request *preq)
953 {
954 struct cdns2_trb *trb;
955 int i;
956
957 trb = pep->ring.trbs + pep->ring.dequeue;
958
959 for (i = preq->finished_trb ; i < preq->num_of_trb; i++) {
960 preq->finished_trb++;
961 trace_cdns2_complete_trb(pep, trb);
962 cdns2_ep_inc_deq(&pep->ring);
963 trb = cdns2_next_trb(pep, trb);
964 }
965
966 cdns2_gadget_giveback(pep, preq, 0);
967 cdns2_prepare_first_isoc_transfer(pdev, pep);
968 pep->skip = false;
969 cdns2_set_drdy(pdev, pep);
970 }
971
cdns2_transfer_completed(struct cdns2_device * pdev,struct cdns2_endpoint * pep)972 static void cdns2_transfer_completed(struct cdns2_device *pdev,
973 struct cdns2_endpoint *pep)
974 {
975 struct cdns2_request *preq = NULL;
976 bool request_handled = false;
977 struct cdns2_trb *trb;
978
979 while (!list_empty(&pep->pending_list)) {
980 preq = cdns2_next_preq(&pep->pending_list);
981 trb = pep->ring.trbs + pep->ring.dequeue;
982
983 /*
984 * The TRB was changed as link TRB, and the request
985 * was handled at ep_dequeue.
986 */
987 while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK &&
988 le32_to_cpu(trb->length)) {
989 trace_cdns2_complete_trb(pep, trb);
990 cdns2_ep_inc_deq(&pep->ring);
991 trb = pep->ring.trbs + pep->ring.dequeue;
992 }
993
994 /*
995 * Re-select endpoint. It could be changed by other CPU
996 * during handling usb_gadget_giveback_request.
997 */
998 cdns2_select_ep(pdev, pep->endpoint.address);
999
1000 while (cdns2_trb_handled(pep, preq)) {
1001 preq->finished_trb++;
1002
1003 if (preq->finished_trb >= preq->num_of_trb)
1004 request_handled = true;
1005
1006 trb = pep->ring.trbs + pep->ring.dequeue;
1007 trace_cdns2_complete_trb(pep, trb);
1008
1009 if (pep->dir && pep->type == USB_ENDPOINT_XFER_ISOC)
1010 /*
1011 * For ISOC IN controller doens't update the
1012 * trb->length.
1013 */
1014 preq->request.actual = preq->request.length;
1015 else
1016 preq->request.actual +=
1017 TRB_LEN(le32_to_cpu(trb->length));
1018
1019 cdns2_ep_inc_deq(&pep->ring);
1020 }
1021
1022 if (request_handled) {
1023 cdns2_gadget_giveback(pep, preq, 0);
1024 request_handled = false;
1025 } else {
1026 goto prepare_next_td;
1027 }
1028
1029 if (pep->type != USB_ENDPOINT_XFER_ISOC &&
1030 TRBS_PER_SEGMENT == 2)
1031 break;
1032 }
1033
1034 prepare_next_td:
1035 if (pep->skip && preq)
1036 cdns2_skip_isoc_td(pdev, pep, preq);
1037
1038 if (!(pep->ep_state & EP_STALLED) &&
1039 !(pep->ep_state & EP_STALL_PENDING))
1040 cdns2_start_all_request(pdev, pep);
1041 }
1042
cdns2_wakeup(struct cdns2_device * pdev)1043 static void cdns2_wakeup(struct cdns2_device *pdev)
1044 {
1045 if (!pdev->may_wakeup)
1046 return;
1047
1048 /* Start driving resume signaling to indicate remote wakeup. */
1049 set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_SIGRSUME);
1050 }
1051
cdns2_rearm_transfer(struct cdns2_endpoint * pep,u8 rearm)1052 static void cdns2_rearm_transfer(struct cdns2_endpoint *pep, u8 rearm)
1053 {
1054 struct cdns2_device *pdev = pep->pdev;
1055
1056 cdns2_wa1_restore_cycle_bit(pep);
1057
1058 if (rearm) {
1059 trace_cdns2_ring(pep);
1060
1061 /* Cycle Bit must be updated before arming DMA. */
1062 dma_wmb();
1063
1064 writel(DMA_EP_CMD_DRDY, &pdev->adma_regs->ep_cmd);
1065
1066 cdns2_wakeup(pdev);
1067
1068 trace_cdns2_doorbell_epx(pep,
1069 readl(&pdev->adma_regs->ep_traddr));
1070 }
1071 }
1072
cdns2_handle_epx_interrupt(struct cdns2_endpoint * pep)1073 static void cdns2_handle_epx_interrupt(struct cdns2_endpoint *pep)
1074 {
1075 struct cdns2_device *pdev = pep->pdev;
1076 u8 isoerror = 0;
1077 u32 ep_sts_reg;
1078 u32 val;
1079
1080 cdns2_select_ep(pdev, pep->endpoint.address);
1081
1082 trace_cdns2_epx_irq(pdev, pep);
1083
1084 ep_sts_reg = readl(&pdev->adma_regs->ep_sts);
1085 writel(ep_sts_reg, &pdev->adma_regs->ep_sts);
1086
1087 if (pep->type == USB_ENDPOINT_XFER_ISOC) {
1088 u8 mult;
1089 u8 cs;
1090
1091 mult = USB_EP_MAXP_MULT(pep->endpoint.desc->wMaxPacketSize);
1092 cs = pep->dir ? readb(&pdev->epx_regs->ep[pep->num - 1].txcs) :
1093 readb(&pdev->epx_regs->ep[pep->num - 1].rxcs);
1094 if (mult > 0)
1095 isoerror = EPX_CS_ERR(cs);
1096 }
1097
1098 /*
1099 * Sometimes ISO Error for mult=1 or mult=2 is not propagated on time
1100 * from USB module to DMA module. To protect against this driver
1101 * checks also the txcs/rxcs registers.
1102 */
1103 if ((ep_sts_reg & DMA_EP_STS_ISOERR) || isoerror) {
1104 clear_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
1105
1106 /* Wait for DBUSY cleared. */
1107 readl_poll_timeout_atomic(&pdev->adma_regs->ep_sts, val,
1108 !(val & DMA_EP_STS_DBUSY), 1, 125);
1109
1110 writel(DMA_EP_CMD_DFLUSH, &pep->pdev->adma_regs->ep_cmd);
1111
1112 /* Wait for DFLUSH cleared. */
1113 readl_poll_timeout_atomic(&pep->pdev->adma_regs->ep_cmd, val,
1114 !(val & DMA_EP_CMD_DFLUSH), 1, 10);
1115
1116 pep->skip = true;
1117 }
1118
1119 if (ep_sts_reg & DMA_EP_STS_TRBERR || pep->skip) {
1120 if (pep->ep_state & EP_STALL_PENDING &&
1121 !(ep_sts_reg & DMA_EP_STS_DESCMIS))
1122 cdns2_ep_stall_flush(pep);
1123
1124 /*
1125 * For isochronous transfer driver completes request on
1126 * IOC or on TRBERR. IOC appears only when device receive
1127 * OUT data packet. If host disable stream or lost some packet
1128 * then the only way to finish all queued transfer is to do it
1129 * on TRBERR event.
1130 */
1131 if (pep->type == USB_ENDPOINT_XFER_ISOC && !pep->wa1_set) {
1132 if (!pep->dir)
1133 clear_reg_bit_32(&pdev->adma_regs->ep_cfg,
1134 DMA_EP_CFG_ENABLE);
1135
1136 cdns2_transfer_completed(pdev, pep);
1137 if (pep->ep_state & EP_DEFERRED_DRDY) {
1138 pep->ep_state &= ~EP_DEFERRED_DRDY;
1139 cdns2_set_drdy(pdev, pep);
1140 }
1141
1142 return;
1143 }
1144
1145 cdns2_transfer_completed(pdev, pep);
1146
1147 if (!(pep->ep_state & EP_STALLED) &&
1148 !(pep->ep_state & EP_STALL_PENDING)) {
1149 if (pep->ep_state & EP_DEFERRED_DRDY) {
1150 pep->ep_state &= ~EP_DEFERRED_DRDY;
1151 cdns2_start_all_request(pdev, pep);
1152 } else {
1153 cdns2_rearm_transfer(pep, pep->wa1_set);
1154 }
1155 }
1156
1157 return;
1158 }
1159
1160 if ((ep_sts_reg & DMA_EP_STS_IOC) || (ep_sts_reg & DMA_EP_STS_ISP))
1161 cdns2_transfer_completed(pdev, pep);
1162 }
1163
cdns2_disconnect_gadget(struct cdns2_device * pdev)1164 static void cdns2_disconnect_gadget(struct cdns2_device *pdev)
1165 {
1166 if (pdev->gadget_driver && pdev->gadget_driver->disconnect)
1167 pdev->gadget_driver->disconnect(&pdev->gadget);
1168 }
1169
cdns2_usb_irq_handler(int irq,void * data)1170 static irqreturn_t cdns2_usb_irq_handler(int irq, void *data)
1171 {
1172 struct cdns2_device *pdev = data;
1173 unsigned long reg_ep_ists;
1174 u8 reg_usb_irq_m;
1175 u8 reg_ext_irq_m;
1176 u8 reg_usb_irq;
1177 u8 reg_ext_irq;
1178
1179 if (pdev->in_lpm)
1180 return IRQ_NONE;
1181
1182 reg_usb_irq_m = readb(&pdev->interrupt_regs->usbien);
1183 reg_ext_irq_m = readb(&pdev->interrupt_regs->extien);
1184
1185 /* Mask all sources of interrupt. */
1186 writeb(0, &pdev->interrupt_regs->usbien);
1187 writeb(0, &pdev->interrupt_regs->extien);
1188 writel(0, &pdev->adma_regs->ep_ien);
1189
1190 /* Clear interrupt sources. */
1191 writel(0, &pdev->adma_regs->ep_sts);
1192 writeb(0, &pdev->interrupt_regs->usbirq);
1193 writeb(0, &pdev->interrupt_regs->extirq);
1194
1195 reg_ep_ists = readl(&pdev->adma_regs->ep_ists);
1196 reg_usb_irq = readb(&pdev->interrupt_regs->usbirq);
1197 reg_ext_irq = readb(&pdev->interrupt_regs->extirq);
1198
1199 if (reg_ep_ists || (reg_usb_irq & reg_usb_irq_m) ||
1200 (reg_ext_irq & reg_ext_irq_m))
1201 return IRQ_WAKE_THREAD;
1202
1203 writeb(USB_IEN_INIT, &pdev->interrupt_regs->usbien);
1204 writeb(EXTIRQ_WAKEUP, &pdev->interrupt_regs->extien);
1205 writel(~0, &pdev->adma_regs->ep_ien);
1206
1207 return IRQ_NONE;
1208 }
1209
cdns2_thread_usb_irq_handler(struct cdns2_device * pdev)1210 static irqreturn_t cdns2_thread_usb_irq_handler(struct cdns2_device *pdev)
1211 {
1212 u8 usb_irq, ext_irq;
1213 int speed;
1214 int i;
1215
1216 ext_irq = readb(&pdev->interrupt_regs->extirq) & EXTIRQ_WAKEUP;
1217 writeb(ext_irq, &pdev->interrupt_regs->extirq);
1218
1219 usb_irq = readb(&pdev->interrupt_regs->usbirq) & USB_IEN_INIT;
1220 writeb(usb_irq, &pdev->interrupt_regs->usbirq);
1221
1222 if (!ext_irq && !usb_irq)
1223 return IRQ_NONE;
1224
1225 trace_cdns2_usb_irq(usb_irq, ext_irq);
1226
1227 if (ext_irq & EXTIRQ_WAKEUP) {
1228 if (pdev->gadget_driver && pdev->gadget_driver->resume) {
1229 spin_unlock(&pdev->lock);
1230 pdev->gadget_driver->resume(&pdev->gadget);
1231 spin_lock(&pdev->lock);
1232 }
1233 }
1234
1235 if (usb_irq & USBIRQ_LPM) {
1236 u8 reg = readb(&pdev->usb_regs->lpmctrl);
1237
1238 /* LPM1 enter */
1239 if (!(reg & LPMCTRLLH_LPMNYET))
1240 writeb(0, &pdev->usb_regs->sleep_clkgate);
1241 }
1242
1243 if (usb_irq & USBIRQ_SUSPEND) {
1244 if (pdev->gadget_driver && pdev->gadget_driver->suspend) {
1245 spin_unlock(&pdev->lock);
1246 pdev->gadget_driver->suspend(&pdev->gadget);
1247 spin_lock(&pdev->lock);
1248 }
1249 }
1250
1251 if (usb_irq & USBIRQ_URESET) {
1252 if (pdev->gadget_driver) {
1253 pdev->dev_address = 0;
1254
1255 spin_unlock(&pdev->lock);
1256 usb_gadget_udc_reset(&pdev->gadget,
1257 pdev->gadget_driver);
1258 spin_lock(&pdev->lock);
1259
1260 /*
1261 * The USBIRQ_URESET is reported at the beginning of
1262 * reset signal. 100ms is enough time to finish reset
1263 * process. For high-speed reset procedure is completed
1264 * when controller detect HS mode.
1265 */
1266 for (i = 0; i < 100; i++) {
1267 mdelay(1);
1268 speed = cdns2_get_speed(pdev);
1269 if (speed == USB_SPEED_HIGH)
1270 break;
1271 }
1272
1273 pdev->gadget.speed = speed;
1274 cdns2_enable_l1(pdev, 0);
1275 cdns2_ep0_config(pdev);
1276 pdev->may_wakeup = 0;
1277 }
1278 }
1279
1280 if (usb_irq & USBIRQ_SUDAV) {
1281 pdev->ep0_stage = CDNS2_SETUP_STAGE;
1282 cdns2_handle_setup_packet(pdev);
1283 }
1284
1285 return IRQ_HANDLED;
1286 }
1287
1288 /* Deferred USB interrupt handler. */
cdns2_thread_irq_handler(int irq,void * data)1289 static irqreturn_t cdns2_thread_irq_handler(int irq, void *data)
1290 {
1291 struct cdns2_device *pdev = data;
1292 unsigned long dma_ep_ists;
1293 unsigned long flags;
1294 unsigned int bit;
1295
1296 local_bh_disable();
1297 spin_lock_irqsave(&pdev->lock, flags);
1298
1299 cdns2_thread_usb_irq_handler(pdev);
1300
1301 dma_ep_ists = readl(&pdev->adma_regs->ep_ists);
1302 if (!dma_ep_ists)
1303 goto unlock;
1304
1305 trace_cdns2_dma_ep_ists(dma_ep_ists);
1306
1307 /* Handle default endpoint OUT. */
1308 if (dma_ep_ists & DMA_EP_ISTS_EP_OUT0)
1309 cdns2_handle_ep0_interrupt(pdev, USB_DIR_OUT);
1310
1311 /* Handle default endpoint IN. */
1312 if (dma_ep_ists & DMA_EP_ISTS_EP_IN0)
1313 cdns2_handle_ep0_interrupt(pdev, USB_DIR_IN);
1314
1315 dma_ep_ists &= ~(DMA_EP_ISTS_EP_OUT0 | DMA_EP_ISTS_EP_IN0);
1316
1317 for_each_set_bit(bit, &dma_ep_ists, sizeof(u32) * BITS_PER_BYTE) {
1318 u8 ep_idx = bit > 16 ? (bit - 16) * 2 : (bit * 2) - 1;
1319
1320 /*
1321 * Endpoints in pdev->eps[] are held in order:
1322 * ep0, ep1out, ep1in, ep2out, ep2in... ep15out, ep15in.
1323 * but in dma_ep_ists in order:
1324 * ep0 ep1out ep2out ... ep15out ep0in ep1in .. ep15in
1325 */
1326 cdns2_handle_epx_interrupt(&pdev->eps[ep_idx]);
1327 }
1328
1329 unlock:
1330 writel(~0, &pdev->adma_regs->ep_ien);
1331 writeb(USB_IEN_INIT, &pdev->interrupt_regs->usbien);
1332 writeb(EXTIRQ_WAKEUP, &pdev->interrupt_regs->extien);
1333
1334 spin_unlock_irqrestore(&pdev->lock, flags);
1335 local_bh_enable();
1336
1337 return IRQ_HANDLED;
1338 }
1339
1340 /* Calculates and assigns onchip memory for endpoints. */
cdns2_eps_onchip_buffer_init(struct cdns2_device * pdev)1341 static void cdns2_eps_onchip_buffer_init(struct cdns2_device *pdev)
1342 {
1343 struct cdns2_endpoint *pep;
1344 int min_buf_tx = 0;
1345 int min_buf_rx = 0;
1346 u16 tx_offset = 0;
1347 u16 rx_offset = 0;
1348 int free;
1349 int i;
1350
1351 for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) {
1352 pep = &pdev->eps[i];
1353
1354 if (!(pep->ep_state & EP_CLAIMED))
1355 continue;
1356
1357 if (pep->dir)
1358 min_buf_tx += pep->buffering;
1359 else
1360 min_buf_rx += pep->buffering;
1361 }
1362
1363 for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) {
1364 pep = &pdev->eps[i];
1365
1366 if (!(pep->ep_state & EP_CLAIMED))
1367 continue;
1368
1369 if (pep->dir) {
1370 free = pdev->onchip_tx_buf - min_buf_tx;
1371
1372 if (free + pep->buffering >= 4)
1373 free = 4;
1374 else
1375 free = free + pep->buffering;
1376
1377 min_buf_tx = min_buf_tx - pep->buffering + free;
1378
1379 pep->buffering = free;
1380
1381 writel(tx_offset,
1382 &pdev->epx_regs->txstaddr[pep->num - 1]);
1383 pdev->epx_regs->txstaddr[pep->num - 1] = tx_offset;
1384
1385 dev_dbg(pdev->dev, "%s onchip address %04x, buffering: %d\n",
1386 pep->name, tx_offset, pep->buffering);
1387
1388 tx_offset += pep->buffering * 1024;
1389 } else {
1390 free = pdev->onchip_rx_buf - min_buf_rx;
1391
1392 if (free + pep->buffering >= 4)
1393 free = 4;
1394 else
1395 free = free + pep->buffering;
1396
1397 min_buf_rx = min_buf_rx - pep->buffering + free;
1398
1399 pep->buffering = free;
1400 writel(rx_offset,
1401 &pdev->epx_regs->rxstaddr[pep->num - 1]);
1402
1403 dev_dbg(pdev->dev, "%s onchip address %04x, buffering: %d\n",
1404 pep->name, rx_offset, pep->buffering);
1405
1406 rx_offset += pep->buffering * 1024;
1407 }
1408 }
1409 }
1410
1411 /* Configure hardware endpoint. */
cdns2_ep_config(struct cdns2_endpoint * pep,bool enable)1412 static int cdns2_ep_config(struct cdns2_endpoint *pep, bool enable)
1413 {
1414 bool is_iso_ep = (pep->type == USB_ENDPOINT_XFER_ISOC);
1415 struct cdns2_device *pdev = pep->pdev;
1416 u32 max_packet_size;
1417 u8 dir = 0;
1418 u8 ep_cfg;
1419 u8 mult;
1420 u32 val;
1421 int ret;
1422
1423 switch (pep->type) {
1424 case USB_ENDPOINT_XFER_INT:
1425 ep_cfg = EPX_CON_TYPE_INT;
1426 break;
1427 case USB_ENDPOINT_XFER_BULK:
1428 ep_cfg = EPX_CON_TYPE_BULK;
1429 break;
1430 default:
1431 mult = USB_EP_MAXP_MULT(pep->endpoint.desc->wMaxPacketSize);
1432 ep_cfg = mult << EPX_CON_ISOD_SHIFT;
1433 ep_cfg |= EPX_CON_TYPE_ISOC;
1434
1435 if (pep->dir) {
1436 set_reg_bit_8(&pdev->epx_regs->isoautoarm, BIT(pep->num));
1437 set_reg_bit_8(&pdev->epx_regs->isoautodump, BIT(pep->num));
1438 set_reg_bit_8(&pdev->epx_regs->isodctrl, BIT(pep->num));
1439 }
1440 }
1441
1442 switch (pdev->gadget.speed) {
1443 case USB_SPEED_FULL:
1444 max_packet_size = is_iso_ep ? 1023 : 64;
1445 break;
1446 case USB_SPEED_HIGH:
1447 max_packet_size = is_iso_ep ? 1024 : 512;
1448 break;
1449 default:
1450 /* All other speed are not supported. */
1451 return -EINVAL;
1452 }
1453
1454 ep_cfg |= (EPX_CON_VAL | (pep->buffering - 1));
1455
1456 if (pep->dir) {
1457 dir = FIFOCTRL_IO_TX;
1458 writew(max_packet_size, &pdev->epx_regs->txmaxpack[pep->num - 1]);
1459 writeb(ep_cfg, &pdev->epx_regs->ep[pep->num - 1].txcon);
1460 } else {
1461 writew(max_packet_size, &pdev->epx_regs->rxmaxpack[pep->num - 1]);
1462 writeb(ep_cfg, &pdev->epx_regs->ep[pep->num - 1].rxcon);
1463 }
1464
1465 writeb(pep->num | dir | FIFOCTRL_FIFOAUTO,
1466 &pdev->usb_regs->fifoctrl);
1467 writeb(pep->num | dir, &pdev->epx_regs->endprst);
1468 writeb(pep->num | ENDPRST_FIFORST | ENDPRST_TOGRST | dir,
1469 &pdev->epx_regs->endprst);
1470
1471 if (max_packet_size == 1024)
1472 pep->trb_burst_size = 128;
1473 else if (max_packet_size >= 512)
1474 pep->trb_burst_size = 64;
1475 else
1476 pep->trb_burst_size = 16;
1477
1478 cdns2_select_ep(pdev, pep->num | pep->dir);
1479 writel(DMA_EP_CMD_EPRST | DMA_EP_CMD_DFLUSH, &pdev->adma_regs->ep_cmd);
1480
1481 ret = readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
1482 !(val & (DMA_EP_CMD_DFLUSH |
1483 DMA_EP_CMD_EPRST)),
1484 1, 1000);
1485
1486 if (ret)
1487 return ret;
1488
1489 writel(DMA_EP_STS_TRBERR | DMA_EP_STS_ISOERR, &pdev->adma_regs->ep_sts_en);
1490
1491 if (enable)
1492 writel(DMA_EP_CFG_ENABLE, &pdev->adma_regs->ep_cfg);
1493
1494 trace_cdns2_epx_hw_cfg(pdev, pep);
1495
1496 dev_dbg(pdev->dev, "Configure %s: with MPS: %08x, ep con: %02x\n",
1497 pep->name, max_packet_size, ep_cfg);
1498
1499 return 0;
1500 }
1501
cdns2_gadget_ep_alloc_request(struct usb_ep * ep,gfp_t gfp_flags)1502 struct usb_request *cdns2_gadget_ep_alloc_request(struct usb_ep *ep,
1503 gfp_t gfp_flags)
1504 {
1505 struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
1506 struct cdns2_request *preq;
1507
1508 preq = kzalloc(sizeof(*preq), gfp_flags);
1509 if (!preq)
1510 return NULL;
1511
1512 preq->pep = pep;
1513
1514 trace_cdns2_alloc_request(preq);
1515
1516 return &preq->request;
1517 }
1518
cdns2_gadget_ep_free_request(struct usb_ep * ep,struct usb_request * request)1519 void cdns2_gadget_ep_free_request(struct usb_ep *ep,
1520 struct usb_request *request)
1521 {
1522 struct cdns2_request *preq = to_cdns2_request(request);
1523
1524 trace_cdns2_free_request(preq);
1525 kfree(preq);
1526 }
1527
cdns2_gadget_ep_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)1528 static int cdns2_gadget_ep_enable(struct usb_ep *ep,
1529 const struct usb_endpoint_descriptor *desc)
1530 {
1531 u32 reg = DMA_EP_STS_EN_TRBERREN;
1532 struct cdns2_endpoint *pep;
1533 struct cdns2_device *pdev;
1534 unsigned long flags;
1535 int enable = 1;
1536 int ret = 0;
1537
1538 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
1539 !desc->wMaxPacketSize) {
1540 return -EINVAL;
1541 }
1542
1543 pep = ep_to_cdns2_ep(ep);
1544 pdev = pep->pdev;
1545
1546 if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
1547 "%s is already enabled\n", pep->name))
1548 return 0;
1549
1550 spin_lock_irqsave(&pdev->lock, flags);
1551
1552 pep->type = usb_endpoint_type(desc);
1553 pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
1554
1555 if (pdev->gadget.speed == USB_SPEED_FULL)
1556 if (pep->type == USB_ENDPOINT_XFER_INT)
1557 pep->interval = desc->bInterval;
1558
1559 if (pep->interval > ISO_MAX_INTERVAL &&
1560 pep->type == USB_ENDPOINT_XFER_ISOC) {
1561 dev_err(pdev->dev, "ISO period is limited to %d (current: %d)\n",
1562 ISO_MAX_INTERVAL, pep->interval);
1563
1564 ret = -EINVAL;
1565 goto exit;
1566 }
1567
1568 /*
1569 * During ISO OUT traffic DMA reads Transfer Ring for the EP which has
1570 * never got doorbell.
1571 * This issue was detected only on simulation, but to avoid this issue
1572 * driver add protection against it. To fix it driver enable ISO OUT
1573 * endpoint before setting DRBL. This special treatment of ISO OUT
1574 * endpoints are recommended by controller specification.
1575 */
1576 if (pep->type == USB_ENDPOINT_XFER_ISOC && !pep->dir)
1577 enable = 0;
1578
1579 ret = cdns2_alloc_tr_segment(pep);
1580 if (ret)
1581 goto exit;
1582
1583 ret = cdns2_ep_config(pep, enable);
1584 if (ret) {
1585 cdns2_free_tr_segment(pep);
1586 ret = -EINVAL;
1587 goto exit;
1588 }
1589
1590 trace_cdns2_gadget_ep_enable(pep);
1591
1592 pep->ep_state &= ~(EP_STALLED | EP_STALL_PENDING);
1593 pep->ep_state |= EP_ENABLED;
1594 pep->wa1_set = 0;
1595 pep->ring.enqueue = 0;
1596 pep->ring.dequeue = 0;
1597 reg = readl(&pdev->adma_regs->ep_sts);
1598 pep->ring.pcs = !!DMA_EP_STS_CCS(reg);
1599 pep->ring.ccs = !!DMA_EP_STS_CCS(reg);
1600
1601 writel(pep->ring.dma, &pdev->adma_regs->ep_traddr);
1602
1603 /* one TRB is reserved for link TRB used in DMULT mode*/
1604 pep->ring.free_trbs = TRBS_PER_SEGMENT - 1;
1605
1606 exit:
1607 spin_unlock_irqrestore(&pdev->lock, flags);
1608
1609 return ret;
1610 }
1611
cdns2_gadget_ep_disable(struct usb_ep * ep)1612 static int cdns2_gadget_ep_disable(struct usb_ep *ep)
1613 {
1614 struct cdns2_endpoint *pep;
1615 struct cdns2_request *preq;
1616 struct cdns2_device *pdev;
1617 unsigned long flags;
1618 int val;
1619
1620 if (!ep)
1621 return -EINVAL;
1622
1623 pep = ep_to_cdns2_ep(ep);
1624 pdev = pep->pdev;
1625
1626 if (dev_WARN_ONCE(pdev->dev, !(pep->ep_state & EP_ENABLED),
1627 "%s is already disabled\n", pep->name))
1628 return 0;
1629
1630 spin_lock_irqsave(&pdev->lock, flags);
1631
1632 trace_cdns2_gadget_ep_disable(pep);
1633
1634 cdns2_select_ep(pdev, ep->desc->bEndpointAddress);
1635
1636 clear_reg_bit_32(&pdev->adma_regs->ep_cfg, DMA_EP_CFG_ENABLE);
1637
1638 /*
1639 * Driver needs some time before resetting endpoint.
1640 * It need waits for clearing DBUSY bit or for timeout expired.
1641 * 10us is enough time for controller to stop transfer.
1642 */
1643 readl_poll_timeout_atomic(&pdev->adma_regs->ep_sts, val,
1644 !(val & DMA_EP_STS_DBUSY), 1, 10);
1645 writel(DMA_EP_CMD_EPRST, &pdev->adma_regs->ep_cmd);
1646
1647 readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
1648 !(val & (DMA_EP_CMD_DFLUSH | DMA_EP_CMD_EPRST)),
1649 1, 1000);
1650
1651 while (!list_empty(&pep->pending_list)) {
1652 preq = cdns2_next_preq(&pep->pending_list);
1653 cdns2_gadget_giveback(pep, preq, -ESHUTDOWN);
1654 }
1655
1656 while (!list_empty(&pep->deferred_list)) {
1657 preq = cdns2_next_preq(&pep->deferred_list);
1658 cdns2_gadget_giveback(pep, preq, -ESHUTDOWN);
1659 }
1660
1661 ep->desc = NULL;
1662 pep->ep_state &= ~EP_ENABLED;
1663
1664 spin_unlock_irqrestore(&pdev->lock, flags);
1665
1666 return 0;
1667 }
1668
cdns2_ep_enqueue(struct cdns2_endpoint * pep,struct cdns2_request * preq,gfp_t gfp_flags)1669 static int cdns2_ep_enqueue(struct cdns2_endpoint *pep,
1670 struct cdns2_request *preq,
1671 gfp_t gfp_flags)
1672 {
1673 struct cdns2_device *pdev = pep->pdev;
1674 struct usb_request *request;
1675 int ret;
1676
1677 request = &preq->request;
1678 request->actual = 0;
1679 request->status = -EINPROGRESS;
1680
1681 ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->dir);
1682 if (ret) {
1683 trace_cdns2_request_enqueue_error(preq);
1684 return ret;
1685 }
1686
1687 list_add_tail(&preq->list, &pep->deferred_list);
1688 trace_cdns2_request_enqueue(preq);
1689
1690 if (!(pep->ep_state & EP_STALLED) && !(pep->ep_state & EP_STALL_PENDING))
1691 cdns2_start_all_request(pdev, pep);
1692
1693 return 0;
1694 }
1695
cdns2_gadget_ep_queue(struct usb_ep * ep,struct usb_request * request,gfp_t gfp_flags)1696 static int cdns2_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1697 gfp_t gfp_flags)
1698 {
1699 struct usb_request *zlp_request;
1700 struct cdns2_request *preq;
1701 struct cdns2_endpoint *pep;
1702 struct cdns2_device *pdev;
1703 unsigned long flags;
1704 int ret;
1705
1706 if (!request || !ep)
1707 return -EINVAL;
1708
1709 pep = ep_to_cdns2_ep(ep);
1710 pdev = pep->pdev;
1711
1712 if (!(pep->ep_state & EP_ENABLED)) {
1713 dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n",
1714 pep->name);
1715 return -EINVAL;
1716 }
1717
1718 spin_lock_irqsave(&pdev->lock, flags);
1719
1720 preq = to_cdns2_request(request);
1721 ret = cdns2_ep_enqueue(pep, preq, gfp_flags);
1722
1723 if (ret == 0 && request->zero && request->length &&
1724 (request->length % ep->maxpacket == 0)) {
1725 struct cdns2_request *preq;
1726
1727 zlp_request = cdns2_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1728 zlp_request->buf = pdev->zlp_buf;
1729 zlp_request->length = 0;
1730
1731 preq = to_cdns2_request(zlp_request);
1732 ret = cdns2_ep_enqueue(pep, preq, gfp_flags);
1733 }
1734
1735 spin_unlock_irqrestore(&pdev->lock, flags);
1736 return ret;
1737 }
1738
cdns2_gadget_ep_dequeue(struct usb_ep * ep,struct usb_request * request)1739 int cdns2_gadget_ep_dequeue(struct usb_ep *ep,
1740 struct usb_request *request)
1741 {
1742 struct cdns2_request *preq, *preq_temp, *cur_preq;
1743 struct cdns2_endpoint *pep;
1744 struct cdns2_trb *link_trb;
1745 u8 req_on_hw_ring = 0;
1746 unsigned long flags;
1747 u32 buffer;
1748 int val, i;
1749
1750 if (!ep || !request || !ep->desc)
1751 return -EINVAL;
1752
1753 pep = ep_to_cdns2_ep(ep);
1754 if (!pep->endpoint.desc) {
1755 dev_err(pep->pdev->dev, "%s: can't dequeue to disabled endpoint\n",
1756 pep->name);
1757 return -ESHUTDOWN;
1758 }
1759
1760 /* Requests has been dequeued during disabling endpoint. */
1761 if (!(pep->ep_state & EP_ENABLED))
1762 return 0;
1763
1764 spin_lock_irqsave(&pep->pdev->lock, flags);
1765
1766 cur_preq = to_cdns2_request(request);
1767 trace_cdns2_request_dequeue(cur_preq);
1768
1769 list_for_each_entry_safe(preq, preq_temp, &pep->pending_list, list) {
1770 if (cur_preq == preq) {
1771 req_on_hw_ring = 1;
1772 goto found;
1773 }
1774 }
1775
1776 list_for_each_entry_safe(preq, preq_temp, &pep->deferred_list, list) {
1777 if (cur_preq == preq)
1778 goto found;
1779 }
1780
1781 goto not_found;
1782
1783 found:
1784 link_trb = preq->trb;
1785
1786 /* Update ring only if removed request is on pending_req_list list. */
1787 if (req_on_hw_ring && link_trb) {
1788 /* Stop DMA */
1789 writel(DMA_EP_CMD_DFLUSH, &pep->pdev->adma_regs->ep_cmd);
1790
1791 /* Wait for DFLUSH cleared. */
1792 readl_poll_timeout_atomic(&pep->pdev->adma_regs->ep_cmd, val,
1793 !(val & DMA_EP_CMD_DFLUSH), 1, 1000);
1794
1795 buffer = cpu_to_le32(TRB_BUFFER(pep->ring.dma +
1796 ((preq->end_trb + 1) * TRB_SIZE)));
1797
1798 for (i = 0; i < preq->num_of_trb; i++) {
1799 link_trb->buffer = buffer;
1800 link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control)
1801 & TRB_CYCLE) | TRB_CHAIN |
1802 TRB_TYPE(TRB_LINK));
1803
1804 trace_cdns2_queue_trb(pep, link_trb);
1805 link_trb = cdns2_next_trb(pep, link_trb);
1806 }
1807
1808 if (pep->wa1_trb == preq->trb)
1809 cdns2_wa1_restore_cycle_bit(pep);
1810 }
1811
1812 cdns2_gadget_giveback(pep, cur_preq, -ECONNRESET);
1813
1814 preq = cdns2_next_preq(&pep->pending_list);
1815 if (preq)
1816 cdns2_rearm_transfer(pep, 1);
1817
1818 not_found:
1819 spin_unlock_irqrestore(&pep->pdev->lock, flags);
1820 return 0;
1821 }
1822
cdns2_halt_endpoint(struct cdns2_device * pdev,struct cdns2_endpoint * pep,int value)1823 int cdns2_halt_endpoint(struct cdns2_device *pdev,
1824 struct cdns2_endpoint *pep,
1825 int value)
1826 {
1827 u8 __iomem *conf;
1828 int dir = 0;
1829
1830 if (!(pep->ep_state & EP_ENABLED))
1831 return -EPERM;
1832
1833 if (pep->dir) {
1834 dir = ENDPRST_IO_TX;
1835 conf = &pdev->epx_regs->ep[pep->num - 1].txcon;
1836 } else {
1837 conf = &pdev->epx_regs->ep[pep->num - 1].rxcon;
1838 }
1839
1840 if (!value) {
1841 struct cdns2_trb *trb = NULL;
1842 struct cdns2_request *preq;
1843 struct cdns2_trb trb_tmp;
1844
1845 preq = cdns2_next_preq(&pep->pending_list);
1846 if (preq) {
1847 trb = preq->trb;
1848 if (trb) {
1849 trb_tmp = *trb;
1850 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
1851 }
1852 }
1853
1854 trace_cdns2_ep_halt(pep, 0, 0);
1855
1856 /* Resets Sequence Number */
1857 writeb(dir | pep->num, &pdev->epx_regs->endprst);
1858 writeb(dir | ENDPRST_TOGRST | pep->num,
1859 &pdev->epx_regs->endprst);
1860
1861 clear_reg_bit_8(conf, EPX_CON_STALL);
1862
1863 pep->ep_state &= ~(EP_STALLED | EP_STALL_PENDING);
1864
1865 if (preq) {
1866 if (trb)
1867 *trb = trb_tmp;
1868
1869 cdns2_rearm_transfer(pep, 1);
1870 }
1871
1872 cdns2_start_all_request(pdev, pep);
1873 } else {
1874 trace_cdns2_ep_halt(pep, 1, 0);
1875 set_reg_bit_8(conf, EPX_CON_STALL);
1876 writeb(dir | pep->num, &pdev->epx_regs->endprst);
1877 writeb(dir | ENDPRST_FIFORST | pep->num,
1878 &pdev->epx_regs->endprst);
1879 pep->ep_state |= EP_STALLED;
1880 }
1881
1882 return 0;
1883 }
1884
1885 /* Sets/clears stall on selected endpoint. */
cdns2_gadget_ep_set_halt(struct usb_ep * ep,int value)1886 static int cdns2_gadget_ep_set_halt(struct usb_ep *ep, int value)
1887 {
1888 struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
1889 struct cdns2_device *pdev = pep->pdev;
1890 struct cdns2_request *preq;
1891 unsigned long flags = 0;
1892 int ret;
1893
1894 spin_lock_irqsave(&pdev->lock, flags);
1895
1896 preq = cdns2_next_preq(&pep->pending_list);
1897 if (value && preq) {
1898 trace_cdns2_ep_busy_try_halt_again(pep);
1899 ret = -EAGAIN;
1900 goto done;
1901 }
1902
1903 if (!value)
1904 pep->ep_state &= ~EP_WEDGE;
1905
1906 ret = cdns2_halt_endpoint(pdev, pep, value);
1907
1908 done:
1909 spin_unlock_irqrestore(&pdev->lock, flags);
1910 return ret;
1911 }
1912
cdns2_gadget_ep_set_wedge(struct usb_ep * ep)1913 static int cdns2_gadget_ep_set_wedge(struct usb_ep *ep)
1914 {
1915 struct cdns2_endpoint *pep = ep_to_cdns2_ep(ep);
1916
1917 cdns2_gadget_ep_set_halt(ep, 1);
1918 pep->ep_state |= EP_WEDGE;
1919
1920 return 0;
1921 }
1922
1923 static struct
cdns2_find_available_ep(struct cdns2_device * pdev,struct usb_endpoint_descriptor * desc)1924 cdns2_endpoint *cdns2_find_available_ep(struct cdns2_device *pdev,
1925 struct usb_endpoint_descriptor *desc)
1926 {
1927 struct cdns2_endpoint *pep;
1928 struct usb_ep *ep;
1929 int ep_correct;
1930
1931 list_for_each_entry(ep, &pdev->gadget.ep_list, ep_list) {
1932 unsigned long num;
1933 int ret;
1934 /* ep name pattern likes epXin or epXout. */
1935 char c[2] = {ep->name[2], '\0'};
1936
1937 ret = kstrtoul(c, 10, &num);
1938 if (ret)
1939 return ERR_PTR(ret);
1940 pep = ep_to_cdns2_ep(ep);
1941
1942 if (pep->num != num)
1943 continue;
1944
1945 ep_correct = (pep->endpoint.caps.dir_in &&
1946 usb_endpoint_dir_in(desc)) ||
1947 (pep->endpoint.caps.dir_out &&
1948 usb_endpoint_dir_out(desc));
1949
1950 if (ep_correct && !(pep->ep_state & EP_CLAIMED))
1951 return pep;
1952 }
1953
1954 return ERR_PTR(-ENOENT);
1955 }
1956
1957 /*
1958 * Function used to recognize which endpoints will be used to optimize
1959 * on-chip memory usage.
1960 */
1961 static struct
cdns2_gadget_match_ep(struct usb_gadget * gadget,struct usb_endpoint_descriptor * desc,struct usb_ss_ep_comp_descriptor * comp_desc)1962 usb_ep *cdns2_gadget_match_ep(struct usb_gadget *gadget,
1963 struct usb_endpoint_descriptor *desc,
1964 struct usb_ss_ep_comp_descriptor *comp_desc)
1965 {
1966 struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
1967 struct cdns2_endpoint *pep;
1968 unsigned long flags;
1969
1970 pep = cdns2_find_available_ep(pdev, desc);
1971 if (IS_ERR(pep)) {
1972 dev_err(pdev->dev, "no available ep\n");
1973 return NULL;
1974 }
1975
1976 spin_lock_irqsave(&pdev->lock, flags);
1977
1978 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC)
1979 pep->buffering = 4;
1980 else
1981 pep->buffering = 1;
1982
1983 pep->ep_state |= EP_CLAIMED;
1984 spin_unlock_irqrestore(&pdev->lock, flags);
1985
1986 return &pep->endpoint;
1987 }
1988
1989 static const struct usb_ep_ops cdns2_gadget_ep_ops = {
1990 .enable = cdns2_gadget_ep_enable,
1991 .disable = cdns2_gadget_ep_disable,
1992 .alloc_request = cdns2_gadget_ep_alloc_request,
1993 .free_request = cdns2_gadget_ep_free_request,
1994 .queue = cdns2_gadget_ep_queue,
1995 .dequeue = cdns2_gadget_ep_dequeue,
1996 .set_halt = cdns2_gadget_ep_set_halt,
1997 .set_wedge = cdns2_gadget_ep_set_wedge,
1998 };
1999
cdns2_gadget_get_frame(struct usb_gadget * gadget)2000 static int cdns2_gadget_get_frame(struct usb_gadget *gadget)
2001 {
2002 struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
2003
2004 return readw(&pdev->usb_regs->frmnr);
2005 }
2006
cdns2_gadget_wakeup(struct usb_gadget * gadget)2007 static int cdns2_gadget_wakeup(struct usb_gadget *gadget)
2008 {
2009 struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
2010 unsigned long flags;
2011
2012 spin_lock_irqsave(&pdev->lock, flags);
2013 cdns2_wakeup(pdev);
2014 spin_unlock_irqrestore(&pdev->lock, flags);
2015
2016 return 0;
2017 }
2018
cdns2_gadget_set_selfpowered(struct usb_gadget * gadget,int is_selfpowered)2019 static int cdns2_gadget_set_selfpowered(struct usb_gadget *gadget,
2020 int is_selfpowered)
2021 {
2022 struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
2023 unsigned long flags;
2024
2025 spin_lock_irqsave(&pdev->lock, flags);
2026 pdev->is_selfpowered = !!is_selfpowered;
2027 spin_unlock_irqrestore(&pdev->lock, flags);
2028 return 0;
2029 }
2030
2031 /* Disable interrupts and begin the controller halting process. */
cdns2_quiesce(struct cdns2_device * pdev)2032 static void cdns2_quiesce(struct cdns2_device *pdev)
2033 {
2034 set_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
2035
2036 /* Disable interrupt. */
2037 writeb(0, &pdev->interrupt_regs->extien);
2038 writeb(0, &pdev->interrupt_regs->usbien);
2039 writew(0, &pdev->adma_regs->ep_ien);
2040
2041 /* Clear interrupt line. */
2042 writeb(0x0, &pdev->interrupt_regs->usbirq);
2043 }
2044
cdns2_gadget_config(struct cdns2_device * pdev)2045 static void cdns2_gadget_config(struct cdns2_device *pdev)
2046 {
2047 cdns2_ep0_config(pdev);
2048
2049 /* Enable DMA interrupts for all endpoints. */
2050 writel(~0x0, &pdev->adma_regs->ep_ien);
2051 cdns2_enable_l1(pdev, 0);
2052 writeb(USB_IEN_INIT, &pdev->interrupt_regs->usbien);
2053 writeb(EXTIRQ_WAKEUP, &pdev->interrupt_regs->extien);
2054 writel(DMA_CONF_DMULT, &pdev->adma_regs->conf);
2055 }
2056
cdns2_gadget_pullup(struct usb_gadget * gadget,int is_on)2057 static int cdns2_gadget_pullup(struct usb_gadget *gadget, int is_on)
2058 {
2059 struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
2060 unsigned long flags;
2061
2062 trace_cdns2_pullup(is_on);
2063
2064 /*
2065 * Disable events handling while controller is being
2066 * enabled/disabled.
2067 */
2068 disable_irq(pdev->irq);
2069 spin_lock_irqsave(&pdev->lock, flags);
2070
2071 if (is_on) {
2072 cdns2_gadget_config(pdev);
2073 clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
2074 } else {
2075 cdns2_quiesce(pdev);
2076 }
2077
2078 spin_unlock_irqrestore(&pdev->lock, flags);
2079 enable_irq(pdev->irq);
2080
2081 return 0;
2082 }
2083
cdns2_gadget_udc_start(struct usb_gadget * gadget,struct usb_gadget_driver * driver)2084 static int cdns2_gadget_udc_start(struct usb_gadget *gadget,
2085 struct usb_gadget_driver *driver)
2086 {
2087 struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
2088 enum usb_device_speed max_speed = driver->max_speed;
2089 unsigned long flags;
2090
2091 spin_lock_irqsave(&pdev->lock, flags);
2092 pdev->gadget_driver = driver;
2093
2094 /* Limit speed if necessary. */
2095 max_speed = min(driver->max_speed, gadget->max_speed);
2096
2097 switch (max_speed) {
2098 case USB_SPEED_FULL:
2099 writeb(SPEEDCTRL_HSDISABLE, &pdev->usb_regs->speedctrl);
2100 break;
2101 case USB_SPEED_HIGH:
2102 writeb(0, &pdev->usb_regs->speedctrl);
2103 break;
2104 default:
2105 dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
2106 max_speed);
2107 fallthrough;
2108 case USB_SPEED_UNKNOWN:
2109 /* Default to highspeed. */
2110 max_speed = USB_SPEED_HIGH;
2111 break;
2112 }
2113
2114 /* Reset all USB endpoints. */
2115 writeb(ENDPRST_IO_TX, &pdev->usb_regs->endprst);
2116 writeb(ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX,
2117 &pdev->usb_regs->endprst);
2118 writeb(ENDPRST_FIFORST | ENDPRST_TOGRST, &pdev->usb_regs->endprst);
2119
2120 cdns2_eps_onchip_buffer_init(pdev);
2121
2122 cdns2_gadget_config(pdev);
2123 spin_unlock_irqrestore(&pdev->lock, flags);
2124
2125 return 0;
2126 }
2127
cdns2_gadget_udc_stop(struct usb_gadget * gadget)2128 static int cdns2_gadget_udc_stop(struct usb_gadget *gadget)
2129 {
2130 struct cdns2_device *pdev = gadget_to_cdns2_device(gadget);
2131 struct cdns2_endpoint *pep;
2132 u32 bEndpointAddress;
2133 struct usb_ep *ep;
2134 int val;
2135
2136 pdev->gadget_driver = NULL;
2137 pdev->gadget.speed = USB_SPEED_UNKNOWN;
2138
2139 list_for_each_entry(ep, &pdev->gadget.ep_list, ep_list) {
2140 pep = ep_to_cdns2_ep(ep);
2141 bEndpointAddress = pep->num | pep->dir;
2142 cdns2_select_ep(pdev, bEndpointAddress);
2143 writel(DMA_EP_CMD_EPRST, &pdev->adma_regs->ep_cmd);
2144 readl_poll_timeout_atomic(&pdev->adma_regs->ep_cmd, val,
2145 !(val & DMA_EP_CMD_EPRST), 1, 100);
2146 }
2147
2148 cdns2_quiesce(pdev);
2149
2150 writeb(ENDPRST_IO_TX, &pdev->usb_regs->endprst);
2151 writeb(ENDPRST_FIFORST | ENDPRST_TOGRST | ENDPRST_IO_TX,
2152 &pdev->epx_regs->endprst);
2153 writeb(ENDPRST_FIFORST | ENDPRST_TOGRST, &pdev->epx_regs->endprst);
2154
2155 return 0;
2156 }
2157
2158 static const struct usb_gadget_ops cdns2_gadget_ops = {
2159 .get_frame = cdns2_gadget_get_frame,
2160 .wakeup = cdns2_gadget_wakeup,
2161 .set_selfpowered = cdns2_gadget_set_selfpowered,
2162 .pullup = cdns2_gadget_pullup,
2163 .udc_start = cdns2_gadget_udc_start,
2164 .udc_stop = cdns2_gadget_udc_stop,
2165 .match_ep = cdns2_gadget_match_ep,
2166 };
2167
cdns2_free_all_eps(struct cdns2_device * pdev)2168 static void cdns2_free_all_eps(struct cdns2_device *pdev)
2169 {
2170 int i;
2171
2172 for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++)
2173 cdns2_free_tr_segment(&pdev->eps[i]);
2174 }
2175
2176 /* Initializes software endpoints of gadget. */
cdns2_init_eps(struct cdns2_device * pdev)2177 static int cdns2_init_eps(struct cdns2_device *pdev)
2178 {
2179 struct cdns2_endpoint *pep;
2180 int i;
2181
2182 for (i = 0; i < CDNS2_ENDPOINTS_NUM; i++) {
2183 bool direction = !(i & 1); /* Start from OUT endpoint. */
2184 u8 epnum = ((i + 1) >> 1);
2185
2186 /*
2187 * Endpoints are being held in pdev->eps[] in form:
2188 * ep0, ep1out, ep1in ... ep15out, ep15in.
2189 */
2190 if (!CDNS2_IF_EP_EXIST(pdev, epnum, direction))
2191 continue;
2192
2193 pep = &pdev->eps[i];
2194 pep->pdev = pdev;
2195 pep->num = epnum;
2196 /* 0 for OUT, 1 for IN. */
2197 pep->dir = direction ? USB_DIR_IN : USB_DIR_OUT;
2198 pep->idx = i;
2199
2200 /* Ep0in and ep0out are represented by pdev->eps[0]. */
2201 if (!epnum) {
2202 int ret;
2203
2204 snprintf(pep->name, sizeof(pep->name), "ep%d%s",
2205 epnum, "BiDir");
2206
2207 cdns2_init_ep0(pdev, pep);
2208
2209 ret = cdns2_alloc_tr_segment(pep);
2210 if (ret) {
2211 dev_err(pdev->dev, "Failed to init ep0\n");
2212 return ret;
2213 }
2214 } else {
2215 snprintf(pep->name, sizeof(pep->name), "ep%d%s",
2216 epnum, !!direction ? "in" : "out");
2217 pep->endpoint.name = pep->name;
2218
2219 usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
2220 pep->endpoint.ops = &cdns2_gadget_ep_ops;
2221 list_add_tail(&pep->endpoint.ep_list, &pdev->gadget.ep_list);
2222
2223 pep->endpoint.caps.dir_in = direction;
2224 pep->endpoint.caps.dir_out = !direction;
2225
2226 pep->endpoint.caps.type_iso = 1;
2227 pep->endpoint.caps.type_bulk = 1;
2228 pep->endpoint.caps.type_int = 1;
2229 }
2230
2231 pep->endpoint.name = pep->name;
2232 pep->ep_state = 0;
2233
2234 dev_dbg(pdev->dev, "Init %s, SupType: CTRL: %s, INT: %s, "
2235 "BULK: %s, ISOC %s, SupDir IN: %s, OUT: %s\n",
2236 pep->name,
2237 str_yes_no(pep->endpoint.caps.type_control),
2238 str_yes_no(pep->endpoint.caps.type_int),
2239 str_yes_no(pep->endpoint.caps.type_bulk),
2240 str_yes_no(pep->endpoint.caps.type_iso),
2241 str_yes_no(pep->endpoint.caps.dir_in),
2242 str_yes_no(pep->endpoint.caps.dir_out));
2243
2244 INIT_LIST_HEAD(&pep->pending_list);
2245 INIT_LIST_HEAD(&pep->deferred_list);
2246 }
2247
2248 return 0;
2249 }
2250
cdns2_gadget_start(struct cdns2_device * pdev)2251 static int cdns2_gadget_start(struct cdns2_device *pdev)
2252 {
2253 u32 max_speed;
2254 void *buf;
2255 int ret;
2256
2257 pdev->usb_regs = pdev->regs;
2258 pdev->ep0_regs = pdev->regs;
2259 pdev->epx_regs = pdev->regs;
2260 pdev->interrupt_regs = pdev->regs;
2261 pdev->adma_regs = pdev->regs + CDNS2_ADMA_REGS_OFFSET;
2262
2263 /* Reset controller. */
2264 writeb(CPUCTRL_SW_RST | CPUCTRL_UPCLK | CPUCTRL_WUEN,
2265 &pdev->usb_regs->cpuctrl);
2266 usleep_range(5, 10);
2267
2268 usb_initialize_gadget(pdev->dev, &pdev->gadget, NULL);
2269
2270 device_property_read_u16(pdev->dev, "cdns,on-chip-tx-buff-size",
2271 &pdev->onchip_tx_buf);
2272 device_property_read_u16(pdev->dev, "cdns,on-chip-rx-buff-size",
2273 &pdev->onchip_rx_buf);
2274 device_property_read_u32(pdev->dev, "cdns,avail-endpoints",
2275 &pdev->eps_supported);
2276
2277 /*
2278 * Driver assumes that each USBHS controller has at least
2279 * one IN and one OUT non control endpoint.
2280 */
2281 if (!pdev->onchip_tx_buf && !pdev->onchip_rx_buf) {
2282 ret = -EINVAL;
2283 dev_err(pdev->dev, "Invalid on-chip memory configuration\n");
2284 goto put_gadget;
2285 }
2286
2287 if (!(pdev->eps_supported & ~0x00010001)) {
2288 ret = -EINVAL;
2289 dev_err(pdev->dev, "No hardware endpoints available\n");
2290 goto put_gadget;
2291 }
2292
2293 max_speed = usb_get_maximum_speed(pdev->dev);
2294
2295 switch (max_speed) {
2296 case USB_SPEED_FULL:
2297 case USB_SPEED_HIGH:
2298 break;
2299 default:
2300 dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
2301 max_speed);
2302 fallthrough;
2303 case USB_SPEED_UNKNOWN:
2304 max_speed = USB_SPEED_HIGH;
2305 break;
2306 }
2307
2308 pdev->gadget.max_speed = max_speed;
2309 pdev->gadget.speed = USB_SPEED_UNKNOWN;
2310 pdev->gadget.ops = &cdns2_gadget_ops;
2311 pdev->gadget.name = "usbhs-gadget";
2312 pdev->gadget.quirk_avoids_skb_reserve = 1;
2313 pdev->gadget.irq = pdev->irq;
2314
2315 spin_lock_init(&pdev->lock);
2316 INIT_WORK(&pdev->pending_status_wq, cdns2_pending_setup_status_handler);
2317
2318 /* Initialize endpoint container. */
2319 INIT_LIST_HEAD(&pdev->gadget.ep_list);
2320 pdev->eps_dma_pool = dma_pool_create("cdns2_eps_dma_pool", pdev->dev,
2321 TR_SEG_SIZE, 8, 0);
2322 if (!pdev->eps_dma_pool) {
2323 dev_err(pdev->dev, "Failed to create TRB dma pool\n");
2324 ret = -ENOMEM;
2325 goto put_gadget;
2326 }
2327
2328 ret = cdns2_init_eps(pdev);
2329 if (ret) {
2330 dev_err(pdev->dev, "Failed to create endpoints\n");
2331 goto destroy_dma_pool;
2332 }
2333
2334 pdev->gadget.sg_supported = 1;
2335
2336 pdev->zlp_buf = kzalloc(CDNS2_EP_ZLP_BUF_SIZE, GFP_KERNEL);
2337 if (!pdev->zlp_buf) {
2338 ret = -ENOMEM;
2339 goto destroy_dma_pool;
2340 }
2341
2342 /* Allocate memory for setup packet buffer. */
2343 buf = dma_alloc_coherent(pdev->dev, 8, &pdev->ep0_preq.request.dma,
2344 GFP_DMA);
2345 pdev->ep0_preq.request.buf = buf;
2346
2347 if (!pdev->ep0_preq.request.buf) {
2348 ret = -ENOMEM;
2349 goto free_zlp_buf;
2350 }
2351
2352 /* Add USB gadget device. */
2353 ret = usb_add_gadget(&pdev->gadget);
2354 if (ret < 0) {
2355 dev_err(pdev->dev, "Failed to add gadget\n");
2356 goto free_ep0_buf;
2357 }
2358
2359 return 0;
2360
2361 free_ep0_buf:
2362 dma_free_coherent(pdev->dev, 8, pdev->ep0_preq.request.buf,
2363 pdev->ep0_preq.request.dma);
2364 free_zlp_buf:
2365 kfree(pdev->zlp_buf);
2366 destroy_dma_pool:
2367 dma_pool_destroy(pdev->eps_dma_pool);
2368 put_gadget:
2369 usb_put_gadget(&pdev->gadget);
2370
2371 return ret;
2372 }
2373
cdns2_gadget_suspend(struct cdns2_device * pdev)2374 int cdns2_gadget_suspend(struct cdns2_device *pdev)
2375 {
2376 unsigned long flags;
2377
2378 cdns2_disconnect_gadget(pdev);
2379
2380 spin_lock_irqsave(&pdev->lock, flags);
2381 pdev->gadget.speed = USB_SPEED_UNKNOWN;
2382
2383 trace_cdns2_device_state("notattached");
2384 usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED);
2385 cdns2_enable_l1(pdev, 0);
2386
2387 /* Disable interrupt for device. */
2388 writeb(0, &pdev->interrupt_regs->usbien);
2389 writel(0, &pdev->adma_regs->ep_ien);
2390 spin_unlock_irqrestore(&pdev->lock, flags);
2391
2392 return 0;
2393 }
2394
cdns2_gadget_resume(struct cdns2_device * pdev,bool hibernated)2395 int cdns2_gadget_resume(struct cdns2_device *pdev, bool hibernated)
2396 {
2397 unsigned long flags;
2398
2399 spin_lock_irqsave(&pdev->lock, flags);
2400
2401 if (!pdev->gadget_driver) {
2402 spin_unlock_irqrestore(&pdev->lock, flags);
2403 return 0;
2404 }
2405
2406 cdns2_gadget_config(pdev);
2407
2408 if (hibernated)
2409 clear_reg_bit_8(&pdev->usb_regs->usbcs, USBCS_DISCON);
2410
2411 spin_unlock_irqrestore(&pdev->lock, flags);
2412
2413 return 0;
2414 }
2415
cdns2_gadget_remove(struct cdns2_device * pdev)2416 void cdns2_gadget_remove(struct cdns2_device *pdev)
2417 {
2418 pm_runtime_mark_last_busy(pdev->dev);
2419 pm_runtime_put_autosuspend(pdev->dev);
2420
2421 usb_del_gadget(&pdev->gadget);
2422 cdns2_free_all_eps(pdev);
2423
2424 dma_pool_destroy(pdev->eps_dma_pool);
2425 kfree(pdev->zlp_buf);
2426 usb_put_gadget(&pdev->gadget);
2427 }
2428
cdns2_gadget_init(struct cdns2_device * pdev)2429 int cdns2_gadget_init(struct cdns2_device *pdev)
2430 {
2431 int ret;
2432
2433 /* Ensure 32-bit DMA Mask. */
2434 ret = dma_set_mask_and_coherent(pdev->dev, DMA_BIT_MASK(32));
2435 if (ret) {
2436 dev_err(pdev->dev, "Failed to set dma mask: %d\n", ret);
2437 return ret;
2438 }
2439
2440 pm_runtime_get_sync(pdev->dev);
2441
2442 cdsn2_isoc_burst_opt(pdev);
2443
2444 ret = cdns2_gadget_start(pdev);
2445 if (ret) {
2446 pm_runtime_put_sync(pdev->dev);
2447 return ret;
2448 }
2449
2450 /*
2451 * Because interrupt line can be shared with other components in
2452 * driver it can't use IRQF_ONESHOT flag here.
2453 */
2454 ret = devm_request_threaded_irq(pdev->dev, pdev->irq,
2455 cdns2_usb_irq_handler,
2456 cdns2_thread_irq_handler,
2457 IRQF_SHARED,
2458 dev_name(pdev->dev),
2459 pdev);
2460 if (ret)
2461 goto err0;
2462
2463 return 0;
2464
2465 err0:
2466 cdns2_gadget_remove(pdev);
2467
2468 return ret;
2469 }
2470