1 // SPDX-License-Identifier: GPL-2.0
2 /* Target based USB-Gadget
3 *
4 * UAS protocol handling, target callbacks, configfs handling,
5 * BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
6 *
7 * Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
8 */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/configfs.h>
14 #include <linux/ctype.h>
15 #include <linux/delay.h>
16 #include <linux/usb/ch9.h>
17 #include <linux/usb/composite.h>
18 #include <linux/usb/gadget.h>
19 #include <linux/usb/storage.h>
20 #include <scsi/scsi_tcq.h>
21 #include <target/target_core_base.h>
22 #include <target/target_core_fabric.h>
23 #include <linux/unaligned.h>
24
25 #include "tcm.h"
26 #include "u_tcm.h"
27 #include "configfs.h"
28
29 #define TPG_INSTANCES 1
30
31 struct tpg_instance {
32 struct usb_function_instance *func_inst;
33 struct usbg_tpg *tpg;
34 };
35
36 static struct tpg_instance tpg_instances[TPG_INSTANCES];
37
38 static DEFINE_MUTEX(tpg_instances_lock);
39
to_f_uas(struct usb_function * f)40 static inline struct f_uas *to_f_uas(struct usb_function *f)
41 {
42 return container_of(f, struct f_uas, function);
43 }
44
45 /* Start bot.c code */
46
bot_enqueue_cmd_cbw(struct f_uas * fu)47 static int bot_enqueue_cmd_cbw(struct f_uas *fu)
48 {
49 int ret;
50
51 if (fu->flags & USBG_BOT_CMD_PEND)
52 return 0;
53
54 ret = usb_ep_queue(fu->ep_out, fu->cmd[0].req, GFP_ATOMIC);
55 if (!ret)
56 fu->flags |= USBG_BOT_CMD_PEND;
57 return ret;
58 }
59
bot_status_complete(struct usb_ep * ep,struct usb_request * req)60 static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
61 {
62 struct usbg_cmd *cmd = req->context;
63 struct f_uas *fu = cmd->fu;
64
65 transport_generic_free_cmd(&cmd->se_cmd, 0);
66 if (req->status == -ESHUTDOWN)
67 return;
68
69 if (req->status < 0)
70 pr_err("ERR %s(%d)\n", __func__, __LINE__);
71
72 /* CSW completed, wait for next CBW */
73 bot_enqueue_cmd_cbw(fu);
74 }
75
bot_enqueue_sense_code(struct f_uas * fu,struct usbg_cmd * cmd)76 static void bot_enqueue_sense_code(struct f_uas *fu, struct usbg_cmd *cmd)
77 {
78 struct bulk_cs_wrap *csw = &fu->bot_status.csw;
79 int ret;
80 unsigned int csw_stat;
81
82 csw_stat = cmd->csw_code;
83 csw->Tag = cmd->bot_tag;
84 csw->Status = csw_stat;
85 fu->bot_status.req->context = cmd;
86 ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_ATOMIC);
87 if (ret)
88 pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
89 }
90
bot_err_compl(struct usb_ep * ep,struct usb_request * req)91 static void bot_err_compl(struct usb_ep *ep, struct usb_request *req)
92 {
93 struct usbg_cmd *cmd = req->context;
94 struct f_uas *fu = cmd->fu;
95
96 if (req->status < 0)
97 pr_err("ERR %s(%d)\n", __func__, __LINE__);
98
99 if (cmd->data_len) {
100 if (cmd->data_len > ep->maxpacket) {
101 req->length = ep->maxpacket;
102 cmd->data_len -= ep->maxpacket;
103 } else {
104 req->length = cmd->data_len;
105 cmd->data_len = 0;
106 }
107
108 usb_ep_queue(ep, req, GFP_ATOMIC);
109 return;
110 }
111 bot_enqueue_sense_code(fu, cmd);
112 }
113
bot_send_bad_status(struct usbg_cmd * cmd)114 static void bot_send_bad_status(struct usbg_cmd *cmd)
115 {
116 struct f_uas *fu = cmd->fu;
117 struct bulk_cs_wrap *csw = &fu->bot_status.csw;
118 struct usb_request *req;
119 struct usb_ep *ep;
120
121 csw->Residue = cpu_to_le32(cmd->data_len);
122
123 if (cmd->data_len) {
124 if (cmd->is_read) {
125 ep = fu->ep_in;
126 req = fu->bot_req_in;
127 } else {
128 ep = fu->ep_out;
129 req = fu->bot_req_out;
130 }
131
132 if (cmd->data_len > fu->ep_in->maxpacket) {
133 req->length = ep->maxpacket;
134 cmd->data_len -= ep->maxpacket;
135 } else {
136 req->length = cmd->data_len;
137 cmd->data_len = 0;
138 }
139 req->complete = bot_err_compl;
140 req->context = cmd;
141 req->buf = fu->cmd[0].buf;
142 usb_ep_queue(ep, req, GFP_KERNEL);
143 } else {
144 bot_enqueue_sense_code(fu, cmd);
145 }
146 }
147
bot_send_status(struct usbg_cmd * cmd,bool moved_data)148 static int bot_send_status(struct usbg_cmd *cmd, bool moved_data)
149 {
150 struct f_uas *fu = cmd->fu;
151 struct bulk_cs_wrap *csw = &fu->bot_status.csw;
152 int ret;
153
154 if (cmd->se_cmd.scsi_status == SAM_STAT_GOOD) {
155 if (!moved_data && cmd->data_len) {
156 /*
157 * the host wants to move data, we don't. Fill / empty
158 * the pipe and then send the csw with reside set.
159 */
160 cmd->csw_code = US_BULK_STAT_OK;
161 bot_send_bad_status(cmd);
162 return 0;
163 }
164
165 csw->Tag = cmd->bot_tag;
166 csw->Residue = cpu_to_le32(0);
167 csw->Status = US_BULK_STAT_OK;
168 fu->bot_status.req->context = cmd;
169
170 ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_KERNEL);
171 if (ret)
172 pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
173 } else {
174 cmd->csw_code = US_BULK_STAT_FAIL;
175 bot_send_bad_status(cmd);
176 }
177 return 0;
178 }
179
180 /*
181 * Called after command (no data transfer) or after the write (to device)
182 * operation is completed
183 */
bot_send_status_response(struct usbg_cmd * cmd)184 static int bot_send_status_response(struct usbg_cmd *cmd)
185 {
186 bool moved_data = false;
187
188 if (!cmd->is_read)
189 moved_data = true;
190 return bot_send_status(cmd, moved_data);
191 }
192
193 /* Read request completed, now we have to send the CSW */
bot_read_compl(struct usb_ep * ep,struct usb_request * req)194 static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
195 {
196 struct usbg_cmd *cmd = req->context;
197
198 if (req->status < 0)
199 pr_err("ERR %s(%d)\n", __func__, __LINE__);
200
201 if (req->status == -ESHUTDOWN) {
202 transport_generic_free_cmd(&cmd->se_cmd, 0);
203 return;
204 }
205
206 bot_send_status(cmd, true);
207 }
208
bot_send_read_response(struct usbg_cmd * cmd)209 static int bot_send_read_response(struct usbg_cmd *cmd)
210 {
211 struct f_uas *fu = cmd->fu;
212 struct se_cmd *se_cmd = &cmd->se_cmd;
213 struct usb_gadget *gadget = fuas_to_gadget(fu);
214 int ret;
215
216 if (!cmd->data_len) {
217 cmd->csw_code = US_BULK_STAT_PHASE;
218 bot_send_bad_status(cmd);
219 return 0;
220 }
221
222 if (!gadget->sg_supported) {
223 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
224 if (!cmd->data_buf)
225 return -ENOMEM;
226
227 sg_copy_to_buffer(se_cmd->t_data_sg,
228 se_cmd->t_data_nents,
229 cmd->data_buf,
230 se_cmd->data_length);
231
232 fu->bot_req_in->buf = cmd->data_buf;
233 } else {
234 fu->bot_req_in->buf = NULL;
235 fu->bot_req_in->num_sgs = se_cmd->t_data_nents;
236 fu->bot_req_in->sg = se_cmd->t_data_sg;
237 }
238
239 fu->bot_req_in->complete = bot_read_compl;
240 fu->bot_req_in->length = se_cmd->data_length;
241 fu->bot_req_in->context = cmd;
242 ret = usb_ep_queue(fu->ep_in, fu->bot_req_in, GFP_ATOMIC);
243 if (ret)
244 pr_err("%s(%d)\n", __func__, __LINE__);
245 return 0;
246 }
247
248 static void usbg_data_write_cmpl(struct usb_ep *, struct usb_request *);
249 static int usbg_prepare_w_request(struct usbg_cmd *, struct usb_request *);
250
bot_send_write_request(struct usbg_cmd * cmd)251 static int bot_send_write_request(struct usbg_cmd *cmd)
252 {
253 struct f_uas *fu = cmd->fu;
254 int ret;
255
256 cmd->fu = fu;
257
258 if (!cmd->data_len) {
259 cmd->csw_code = US_BULK_STAT_PHASE;
260 return -EINVAL;
261 }
262
263 ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
264 if (ret)
265 goto cleanup;
266 ret = usb_ep_queue(fu->ep_out, fu->bot_req_out, GFP_KERNEL);
267 if (ret)
268 pr_err("%s(%d)\n", __func__, __LINE__);
269
270 cleanup:
271 return ret;
272 }
273
274 static int bot_submit_command(struct f_uas *, void *, unsigned int);
275
bot_cmd_complete(struct usb_ep * ep,struct usb_request * req)276 static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
277 {
278 struct f_uas *fu = req->context;
279 int ret;
280
281 if (req->status == -ESHUTDOWN)
282 return;
283
284 fu->flags &= ~USBG_BOT_CMD_PEND;
285
286 if (req->status < 0) {
287 struct usb_gadget *gadget = fuas_to_gadget(fu);
288
289 dev_err(&gadget->dev, "BOT command req err (%d)\n", req->status);
290 bot_enqueue_cmd_cbw(fu);
291 return;
292 }
293
294 ret = bot_submit_command(fu, req->buf, req->actual);
295 if (ret) {
296 pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
297 if (!(fu->flags & USBG_BOT_WEDGED))
298 usb_ep_set_wedge(fu->ep_in);
299
300 fu->flags |= USBG_BOT_WEDGED;
301 bot_enqueue_cmd_cbw(fu);
302 } else if (fu->flags & USBG_BOT_WEDGED) {
303 fu->flags &= ~USBG_BOT_WEDGED;
304 usb_ep_clear_halt(fu->ep_in);
305 }
306 }
307
bot_prepare_reqs(struct f_uas * fu)308 static int bot_prepare_reqs(struct f_uas *fu)
309 {
310 int ret;
311
312 fu->bot_req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
313 if (!fu->bot_req_in)
314 goto err;
315
316 fu->bot_req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
317 if (!fu->bot_req_out)
318 goto err_out;
319
320 fu->cmd[0].req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
321 if (!fu->cmd[0].req)
322 goto err_cmd;
323
324 fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
325 if (!fu->bot_status.req)
326 goto err_sts;
327
328 fu->bot_status.req->buf = &fu->bot_status.csw;
329 fu->bot_status.req->length = US_BULK_CS_WRAP_LEN;
330 fu->bot_status.req->complete = bot_status_complete;
331 fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
332
333 fu->cmd[0].buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
334 if (!fu->cmd[0].buf)
335 goto err_buf;
336
337 fu->cmd[0].req->complete = bot_cmd_complete;
338 fu->cmd[0].req->buf = fu->cmd[0].buf;
339 fu->cmd[0].req->length = fu->ep_out->maxpacket;
340 fu->cmd[0].req->context = fu;
341
342 ret = bot_enqueue_cmd_cbw(fu);
343 if (ret)
344 goto err_queue;
345 return 0;
346 err_queue:
347 kfree(fu->cmd[0].buf);
348 fu->cmd[0].buf = NULL;
349 err_buf:
350 usb_ep_free_request(fu->ep_in, fu->bot_status.req);
351 err_sts:
352 usb_ep_free_request(fu->ep_out, fu->cmd[0].req);
353 fu->cmd[0].req = NULL;
354 err_cmd:
355 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
356 fu->bot_req_out = NULL;
357 err_out:
358 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
359 fu->bot_req_in = NULL;
360 err:
361 pr_err("BOT: endpoint setup failed\n");
362 return -ENOMEM;
363 }
364
bot_cleanup_old_alt(struct f_uas * fu)365 static void bot_cleanup_old_alt(struct f_uas *fu)
366 {
367 if (!(fu->flags & USBG_ENABLED))
368 return;
369
370 usb_ep_disable(fu->ep_in);
371 usb_ep_disable(fu->ep_out);
372
373 if (!fu->bot_req_in)
374 return;
375
376 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
377 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
378 usb_ep_free_request(fu->ep_out, fu->cmd[0].req);
379 usb_ep_free_request(fu->ep_in, fu->bot_status.req);
380
381 kfree(fu->cmd[0].buf);
382
383 fu->bot_req_in = NULL;
384 fu->bot_req_out = NULL;
385 fu->cmd[0].req = NULL;
386 fu->bot_status.req = NULL;
387 fu->cmd[0].buf = NULL;
388 }
389
bot_set_alt(struct f_uas * fu)390 static void bot_set_alt(struct f_uas *fu)
391 {
392 struct usb_function *f = &fu->function;
393 struct usb_gadget *gadget = f->config->cdev->gadget;
394 int ret;
395
396 fu->flags = USBG_IS_BOT;
397
398 config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_BBB);
399 ret = usb_ep_enable(fu->ep_in);
400 if (ret)
401 goto err_b_in;
402
403 config_ep_by_speed_and_alt(gadget, f, fu->ep_out, USB_G_ALT_INT_BBB);
404 ret = usb_ep_enable(fu->ep_out);
405 if (ret)
406 goto err_b_out;
407
408 ret = bot_prepare_reqs(fu);
409 if (ret)
410 goto err_wq;
411 fu->flags |= USBG_ENABLED;
412 pr_info("Using the BOT protocol\n");
413 return;
414 err_wq:
415 usb_ep_disable(fu->ep_out);
416 err_b_out:
417 usb_ep_disable(fu->ep_in);
418 err_b_in:
419 fu->flags = USBG_IS_BOT;
420 }
421
usbg_bot_setup(struct usb_function * f,const struct usb_ctrlrequest * ctrl)422 static int usbg_bot_setup(struct usb_function *f,
423 const struct usb_ctrlrequest *ctrl)
424 {
425 struct f_uas *fu = to_f_uas(f);
426 struct usb_composite_dev *cdev = f->config->cdev;
427 u16 w_value = le16_to_cpu(ctrl->wValue);
428 u16 w_length = le16_to_cpu(ctrl->wLength);
429 int luns;
430 u8 *ret_lun;
431
432 switch (ctrl->bRequest) {
433 case US_BULK_GET_MAX_LUN:
434 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS |
435 USB_RECIP_INTERFACE))
436 return -ENOTSUPP;
437
438 if (w_length < 1)
439 return -EINVAL;
440 if (w_value != 0)
441 return -EINVAL;
442 luns = atomic_read(&fu->tpg->tpg_port_count);
443 if (!luns) {
444 pr_err("No LUNs configured?\n");
445 return -EINVAL;
446 }
447 luns--;
448 if (luns > US_BULK_MAX_LUN_LIMIT) {
449 pr_info_once("Limiting the number of luns to 16\n");
450 luns = US_BULK_MAX_LUN_LIMIT;
451 }
452 ret_lun = cdev->req->buf;
453 *ret_lun = luns;
454 cdev->req->length = 1;
455 return usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
456
457 case US_BULK_RESET_REQUEST:
458 /* XXX maybe we should remove previous requests for IN + OUT */
459 if (fu->flags & USBG_BOT_WEDGED) {
460 fu->flags &= ~USBG_BOT_WEDGED;
461 usb_ep_clear_halt(fu->ep_in);
462 }
463
464 bot_enqueue_cmd_cbw(fu);
465 return 0;
466 }
467 return -ENOTSUPP;
468 }
469
470 /* Start uas.c code */
471
tcm_to_uasp_response(enum tcm_tmrsp_table code)472 static int tcm_to_uasp_response(enum tcm_tmrsp_table code)
473 {
474 switch (code) {
475 case TMR_FUNCTION_FAILED:
476 return RC_TMF_FAILED;
477 case TMR_FUNCTION_COMPLETE:
478 case TMR_TASK_DOES_NOT_EXIST:
479 return RC_TMF_COMPLETE;
480 case TMR_LUN_DOES_NOT_EXIST:
481 return RC_INCORRECT_LUN;
482 case TMR_FUNCTION_REJECTED:
483 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
484 default:
485 return RC_TMF_NOT_SUPPORTED;
486 }
487 }
488
uasp_to_tcm_func(int code)489 static unsigned char uasp_to_tcm_func(int code)
490 {
491 switch (code) {
492 case TMF_ABORT_TASK:
493 return TMR_ABORT_TASK;
494 case TMF_ABORT_TASK_SET:
495 return TMR_ABORT_TASK_SET;
496 case TMF_CLEAR_TASK_SET:
497 return TMR_CLEAR_TASK_SET;
498 case TMF_LOGICAL_UNIT_RESET:
499 return TMR_LUN_RESET;
500 case TMF_CLEAR_ACA:
501 return TMR_CLEAR_ACA;
502 case TMF_I_T_NEXUS_RESET:
503 case TMF_QUERY_TASK:
504 case TMF_QUERY_TASK_SET:
505 case TMF_QUERY_ASYNC_EVENT:
506 default:
507 return TMR_UNKNOWN;
508 }
509 }
510
uasp_cleanup_one_stream(struct f_uas * fu,struct uas_stream * stream)511 static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
512 {
513 /* We have either all three allocated or none */
514 if (!stream->req_in)
515 return;
516
517 usb_ep_free_request(fu->ep_in, stream->req_in);
518 usb_ep_free_request(fu->ep_out, stream->req_out);
519 usb_ep_free_request(fu->ep_status, stream->req_status);
520
521 stream->req_in = NULL;
522 stream->req_out = NULL;
523 stream->req_status = NULL;
524 }
525
uasp_free_cmdreq(struct f_uas * fu)526 static void uasp_free_cmdreq(struct f_uas *fu)
527 {
528 int i;
529
530 for (i = 0; i < USBG_NUM_CMDS; i++) {
531 usb_ep_free_request(fu->ep_cmd, fu->cmd[i].req);
532 kfree(fu->cmd[i].buf);
533 fu->cmd[i].req = NULL;
534 fu->cmd[i].buf = NULL;
535 }
536 }
537
uasp_cleanup_old_alt(struct f_uas * fu)538 static void uasp_cleanup_old_alt(struct f_uas *fu)
539 {
540 int i;
541
542 if (!(fu->flags & USBG_ENABLED))
543 return;
544
545 usb_ep_disable(fu->ep_in);
546 usb_ep_disable(fu->ep_out);
547 usb_ep_disable(fu->ep_status);
548 usb_ep_disable(fu->ep_cmd);
549
550 for (i = 0; i < USBG_NUM_CMDS; i++)
551 uasp_cleanup_one_stream(fu, &fu->stream[i]);
552 uasp_free_cmdreq(fu);
553 }
554
555 static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
556
uasp_prepare_r_request(struct usbg_cmd * cmd)557 static int uasp_prepare_r_request(struct usbg_cmd *cmd)
558 {
559 struct se_cmd *se_cmd = &cmd->se_cmd;
560 struct f_uas *fu = cmd->fu;
561 struct usb_gadget *gadget = fuas_to_gadget(fu);
562 struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
563
564 if (!gadget->sg_supported) {
565 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
566 if (!cmd->data_buf)
567 return -ENOMEM;
568
569 sg_copy_to_buffer(se_cmd->t_data_sg,
570 se_cmd->t_data_nents,
571 cmd->data_buf,
572 se_cmd->data_length);
573
574 stream->req_in->buf = cmd->data_buf;
575 } else {
576 stream->req_in->buf = NULL;
577 stream->req_in->num_sgs = se_cmd->t_data_nents;
578 stream->req_in->sg = se_cmd->t_data_sg;
579 }
580
581 stream->req_in->is_last = 1;
582 stream->req_in->stream_id = cmd->tag;
583 stream->req_in->complete = uasp_status_data_cmpl;
584 stream->req_in->length = se_cmd->data_length;
585 stream->req_in->context = cmd;
586
587 cmd->state = UASP_SEND_STATUS;
588 return 0;
589 }
590
uasp_prepare_status(struct usbg_cmd * cmd)591 static void uasp_prepare_status(struct usbg_cmd *cmd)
592 {
593 struct se_cmd *se_cmd = &cmd->se_cmd;
594 struct sense_iu *iu = &cmd->sense_iu;
595 struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
596
597 cmd->state = UASP_QUEUE_COMMAND;
598 iu->iu_id = IU_ID_STATUS;
599 iu->tag = cpu_to_be16(cmd->tag);
600
601 /*
602 * iu->status_qual = cpu_to_be16(STATUS QUALIFIER SAM-4. Where R U?);
603 */
604 iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
605 iu->status = se_cmd->scsi_status;
606 stream->req_status->is_last = 1;
607 stream->req_status->stream_id = cmd->tag;
608 stream->req_status->context = cmd;
609 stream->req_status->length = se_cmd->scsi_sense_length + 16;
610 stream->req_status->buf = iu;
611 stream->req_status->complete = uasp_status_data_cmpl;
612 }
613
uasp_prepare_response(struct usbg_cmd * cmd)614 static void uasp_prepare_response(struct usbg_cmd *cmd)
615 {
616 struct se_cmd *se_cmd = &cmd->se_cmd;
617 struct response_iu *rsp_iu = &cmd->response_iu;
618 struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
619
620 cmd->state = UASP_QUEUE_COMMAND;
621 rsp_iu->iu_id = IU_ID_RESPONSE;
622 rsp_iu->tag = cpu_to_be16(cmd->tag);
623
624 if (cmd->tmr_rsp != RC_RESPONSE_UNKNOWN)
625 rsp_iu->response_code = cmd->tmr_rsp;
626 else
627 rsp_iu->response_code =
628 tcm_to_uasp_response(se_cmd->se_tmr_req->response);
629
630 /*
631 * The UASP driver must support all the task management functions listed
632 * in Table 20 of UAS-r04. To remain compliant while indicate that the
633 * TMR did not go through, report RC_TMF_FAILED instead of
634 * RC_TMF_NOT_SUPPORTED and print a warning to the user.
635 */
636 switch (cmd->tmr_func) {
637 case TMF_ABORT_TASK:
638 case TMF_ABORT_TASK_SET:
639 case TMF_CLEAR_TASK_SET:
640 case TMF_LOGICAL_UNIT_RESET:
641 case TMF_CLEAR_ACA:
642 case TMF_I_T_NEXUS_RESET:
643 case TMF_QUERY_TASK:
644 case TMF_QUERY_TASK_SET:
645 case TMF_QUERY_ASYNC_EVENT:
646 if (rsp_iu->response_code == RC_TMF_NOT_SUPPORTED) {
647 struct usb_gadget *gadget = fuas_to_gadget(cmd->fu);
648
649 dev_warn(&gadget->dev, "TMF function %d not supported\n",
650 cmd->tmr_func);
651 rsp_iu->response_code = RC_TMF_FAILED;
652 }
653 break;
654 default:
655 break;
656 }
657
658 stream->req_status->is_last = 1;
659 stream->req_status->stream_id = cmd->tag;
660 stream->req_status->context = cmd;
661 stream->req_status->length = sizeof(struct response_iu);
662 stream->req_status->buf = rsp_iu;
663 stream->req_status->complete = uasp_status_data_cmpl;
664 }
665
666 static void usbg_release_cmd(struct se_cmd *se_cmd);
667 static int uasp_send_tm_response(struct usbg_cmd *cmd);
668
uasp_status_data_cmpl(struct usb_ep * ep,struct usb_request * req)669 static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
670 {
671 struct usbg_cmd *cmd = req->context;
672 struct f_uas *fu = cmd->fu;
673 struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
674 int ret;
675
676 if (req->status == -ESHUTDOWN)
677 goto cleanup;
678
679 switch (cmd->state) {
680 case UASP_SEND_DATA:
681 ret = uasp_prepare_r_request(cmd);
682 if (ret)
683 goto cleanup;
684 ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
685 if (ret)
686 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
687 break;
688
689 case UASP_RECEIVE_DATA:
690 ret = usbg_prepare_w_request(cmd, stream->req_out);
691 if (ret)
692 goto cleanup;
693 ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
694 if (ret)
695 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
696 break;
697
698 case UASP_SEND_STATUS:
699 uasp_prepare_status(cmd);
700 ret = usb_ep_queue(fu->ep_status, stream->req_status,
701 GFP_ATOMIC);
702 if (ret)
703 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
704 break;
705
706 case UASP_QUEUE_COMMAND:
707 /*
708 * Overlapped command detected and cancelled.
709 * So send overlapped attempted status.
710 */
711 if (cmd->tmr_rsp == RC_OVERLAPPED_TAG &&
712 req->status == -ECONNRESET) {
713 uasp_send_tm_response(cmd);
714 return;
715 }
716
717 hash_del(&stream->node);
718
719 /*
720 * If no command submitted to target core here, just free the
721 * bitmap index. This is for the cases where f_tcm handles
722 * status response instead of the target core.
723 */
724 if (cmd->tmr_rsp != RC_OVERLAPPED_TAG &&
725 cmd->tmr_rsp != RC_RESPONSE_UNKNOWN) {
726 struct se_session *se_sess;
727
728 se_sess = fu->tpg->tpg_nexus->tvn_se_sess;
729 sbitmap_queue_clear(&se_sess->sess_tag_pool,
730 cmd->se_cmd.map_tag,
731 cmd->se_cmd.map_cpu);
732 } else {
733 transport_generic_free_cmd(&cmd->se_cmd, 0);
734 }
735
736 usb_ep_queue(fu->ep_cmd, cmd->req, GFP_ATOMIC);
737 complete(&stream->cmd_completion);
738 break;
739
740 default:
741 BUG();
742 }
743 return;
744
745 cleanup:
746 hash_del(&stream->node);
747 transport_generic_free_cmd(&cmd->se_cmd, 0);
748 }
749
uasp_send_status_response(struct usbg_cmd * cmd)750 static int uasp_send_status_response(struct usbg_cmd *cmd)
751 {
752 struct f_uas *fu = cmd->fu;
753 struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
754 struct sense_iu *iu = &cmd->sense_iu;
755
756 iu->tag = cpu_to_be16(cmd->tag);
757 cmd->fu = fu;
758 uasp_prepare_status(cmd);
759 return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
760 }
761
uasp_send_tm_response(struct usbg_cmd * cmd)762 static int uasp_send_tm_response(struct usbg_cmd *cmd)
763 {
764 struct f_uas *fu = cmd->fu;
765 struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
766 struct response_iu *iu = &cmd->response_iu;
767
768 iu->tag = cpu_to_be16(cmd->tag);
769 cmd->fu = fu;
770 uasp_prepare_response(cmd);
771 return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
772 }
773
uasp_send_read_response(struct usbg_cmd * cmd)774 static int uasp_send_read_response(struct usbg_cmd *cmd)
775 {
776 struct f_uas *fu = cmd->fu;
777 struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
778 struct sense_iu *iu = &cmd->sense_iu;
779 int ret;
780
781 cmd->fu = fu;
782
783 iu->tag = cpu_to_be16(cmd->tag);
784 if (fu->flags & USBG_USE_STREAMS) {
785
786 ret = uasp_prepare_r_request(cmd);
787 if (ret)
788 goto out;
789 ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
790 if (ret) {
791 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
792 kfree(cmd->data_buf);
793 cmd->data_buf = NULL;
794 }
795
796 } else {
797
798 iu->iu_id = IU_ID_READ_READY;
799 iu->tag = cpu_to_be16(cmd->tag);
800
801 stream->req_status->complete = uasp_status_data_cmpl;
802 stream->req_status->context = cmd;
803
804 cmd->state = UASP_SEND_DATA;
805 stream->req_status->buf = iu;
806 stream->req_status->length = sizeof(struct iu);
807
808 ret = usb_ep_queue(fu->ep_status, stream->req_status,
809 GFP_ATOMIC);
810 if (ret)
811 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
812 }
813 out:
814 return ret;
815 }
816
uasp_send_write_request(struct usbg_cmd * cmd)817 static int uasp_send_write_request(struct usbg_cmd *cmd)
818 {
819 struct f_uas *fu = cmd->fu;
820 struct se_cmd *se_cmd = &cmd->se_cmd;
821 struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
822 struct sense_iu *iu = &cmd->sense_iu;
823 int ret;
824
825 cmd->fu = fu;
826
827 iu->tag = cpu_to_be16(cmd->tag);
828
829 if (fu->flags & USBG_USE_STREAMS) {
830
831 ret = usbg_prepare_w_request(cmd, stream->req_out);
832 if (ret)
833 goto cleanup;
834 ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
835 if (ret)
836 pr_err("%s(%d)\n", __func__, __LINE__);
837
838 } else {
839
840 iu->iu_id = IU_ID_WRITE_READY;
841 iu->tag = cpu_to_be16(cmd->tag);
842
843 stream->req_status->complete = uasp_status_data_cmpl;
844 stream->req_status->context = cmd;
845
846 cmd->state = UASP_RECEIVE_DATA;
847 stream->req_status->buf = iu;
848 stream->req_status->length = sizeof(struct iu);
849
850 ret = usb_ep_queue(fu->ep_status, stream->req_status,
851 GFP_ATOMIC);
852 if (ret)
853 pr_err("%s(%d)\n", __func__, __LINE__);
854 }
855
856 cleanup:
857 return ret;
858 }
859
860 static int usbg_submit_command(struct f_uas *, struct usb_request *);
861
uasp_cmd_complete(struct usb_ep * ep,struct usb_request * req)862 static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
863 {
864 struct f_uas *fu = req->context;
865
866 if (req->status == -ESHUTDOWN)
867 return;
868
869 if (req->status < 0) {
870 usb_ep_queue(fu->ep_cmd, req, GFP_ATOMIC);
871 return;
872 }
873
874 usbg_submit_command(fu, req);
875 }
876
uasp_alloc_stream_res(struct f_uas * fu,struct uas_stream * stream)877 static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
878 {
879 init_completion(&stream->cmd_completion);
880
881 stream->req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
882 if (!stream->req_in)
883 goto out;
884
885 stream->req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
886 if (!stream->req_out)
887 goto err_out;
888
889 stream->req_status = usb_ep_alloc_request(fu->ep_status, GFP_KERNEL);
890 if (!stream->req_status)
891 goto err_sts;
892
893 return 0;
894
895 err_sts:
896 usb_ep_free_request(fu->ep_out, stream->req_out);
897 stream->req_out = NULL;
898 err_out:
899 usb_ep_free_request(fu->ep_in, stream->req_in);
900 stream->req_in = NULL;
901 out:
902 return -ENOMEM;
903 }
904
uasp_alloc_cmd(struct f_uas * fu,int i)905 static int uasp_alloc_cmd(struct f_uas *fu, int i)
906 {
907 fu->cmd[i].req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
908 if (!fu->cmd[i].req)
909 goto err;
910
911 fu->cmd[i].buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
912 if (!fu->cmd[i].buf)
913 goto err_buf;
914
915 fu->cmd[i].req->complete = uasp_cmd_complete;
916 fu->cmd[i].req->buf = fu->cmd[i].buf;
917 fu->cmd[i].req->length = fu->ep_cmd->maxpacket;
918 fu->cmd[i].req->context = fu;
919 return 0;
920
921 err_buf:
922 usb_ep_free_request(fu->ep_cmd, fu->cmd[i].req);
923 err:
924 return -ENOMEM;
925 }
926
uasp_prepare_reqs(struct f_uas * fu)927 static int uasp_prepare_reqs(struct f_uas *fu)
928 {
929 int ret;
930 int i;
931
932 for (i = 0; i < USBG_NUM_CMDS; i++) {
933 ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
934 if (ret)
935 goto err_cleanup;
936 }
937
938 for (i = 0; i < USBG_NUM_CMDS; i++) {
939 ret = uasp_alloc_cmd(fu, i);
940 if (ret)
941 goto err_free_stream;
942
943 ret = usb_ep_queue(fu->ep_cmd, fu->cmd[i].req, GFP_ATOMIC);
944 if (ret)
945 goto err_free_stream;
946 }
947
948 return 0;
949
950 err_free_stream:
951 uasp_free_cmdreq(fu);
952
953 err_cleanup:
954 if (i) {
955 do {
956 uasp_cleanup_one_stream(fu, &fu->stream[i - 1]);
957 i--;
958 } while (i);
959 }
960 pr_err("UASP: endpoint setup failed\n");
961 return ret;
962 }
963
uasp_set_alt(struct f_uas * fu)964 static void uasp_set_alt(struct f_uas *fu)
965 {
966 struct usb_function *f = &fu->function;
967 struct usb_gadget *gadget = f->config->cdev->gadget;
968 int ret;
969
970 fu->flags = USBG_IS_UAS;
971
972 if (gadget->speed >= USB_SPEED_SUPER)
973 fu->flags |= USBG_USE_STREAMS;
974
975 config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_UAS);
976 ret = usb_ep_enable(fu->ep_in);
977 if (ret)
978 goto err_b_in;
979
980 config_ep_by_speed_and_alt(gadget, f, fu->ep_out, USB_G_ALT_INT_UAS);
981 ret = usb_ep_enable(fu->ep_out);
982 if (ret)
983 goto err_b_out;
984
985 config_ep_by_speed_and_alt(gadget, f, fu->ep_cmd, USB_G_ALT_INT_UAS);
986 ret = usb_ep_enable(fu->ep_cmd);
987 if (ret)
988 goto err_cmd;
989 config_ep_by_speed_and_alt(gadget, f, fu->ep_status, USB_G_ALT_INT_UAS);
990 ret = usb_ep_enable(fu->ep_status);
991 if (ret)
992 goto err_status;
993
994 ret = uasp_prepare_reqs(fu);
995 if (ret)
996 goto err_wq;
997 fu->flags |= USBG_ENABLED;
998
999 pr_info("Using the UAS protocol\n");
1000 return;
1001 err_wq:
1002 usb_ep_disable(fu->ep_status);
1003 err_status:
1004 usb_ep_disable(fu->ep_cmd);
1005 err_cmd:
1006 usb_ep_disable(fu->ep_out);
1007 err_b_out:
1008 usb_ep_disable(fu->ep_in);
1009 err_b_in:
1010 fu->flags = 0;
1011 }
1012
get_cmd_dir(const unsigned char * cdb)1013 static int get_cmd_dir(const unsigned char *cdb)
1014 {
1015 int ret;
1016
1017 switch (cdb[0]) {
1018 case READ_6:
1019 case READ_10:
1020 case READ_12:
1021 case READ_16:
1022 case INQUIRY:
1023 case MODE_SENSE:
1024 case MODE_SENSE_10:
1025 case SERVICE_ACTION_IN_16:
1026 case MAINTENANCE_IN:
1027 case PERSISTENT_RESERVE_IN:
1028 case SECURITY_PROTOCOL_IN:
1029 case ACCESS_CONTROL_IN:
1030 case REPORT_LUNS:
1031 case READ_BLOCK_LIMITS:
1032 case READ_POSITION:
1033 case READ_CAPACITY:
1034 case READ_TOC:
1035 case READ_FORMAT_CAPACITIES:
1036 case REQUEST_SENSE:
1037 case ATA_12:
1038 case ATA_16:
1039 ret = DMA_FROM_DEVICE;
1040 break;
1041
1042 case WRITE_6:
1043 case WRITE_10:
1044 case WRITE_12:
1045 case WRITE_16:
1046 case MODE_SELECT:
1047 case MODE_SELECT_10:
1048 case WRITE_VERIFY:
1049 case WRITE_VERIFY_12:
1050 case PERSISTENT_RESERVE_OUT:
1051 case MAINTENANCE_OUT:
1052 case SECURITY_PROTOCOL_OUT:
1053 case ACCESS_CONTROL_OUT:
1054 ret = DMA_TO_DEVICE;
1055 break;
1056 case ALLOW_MEDIUM_REMOVAL:
1057 case TEST_UNIT_READY:
1058 case SYNCHRONIZE_CACHE:
1059 case START_STOP:
1060 case ERASE:
1061 case REZERO_UNIT:
1062 case SEEK_10:
1063 case SPACE:
1064 case VERIFY:
1065 case WRITE_FILEMARKS:
1066 ret = DMA_NONE;
1067 break;
1068 default:
1069 #define CMD_DIR_MSG "target: Unknown data direction for SCSI Opcode 0x%02x\n"
1070 pr_warn(CMD_DIR_MSG, cdb[0]);
1071 #undef CMD_DIR_MSG
1072 ret = -EINVAL;
1073 }
1074 return ret;
1075 }
1076
usbg_data_write_cmpl(struct usb_ep * ep,struct usb_request * req)1077 static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
1078 {
1079 struct usbg_cmd *cmd = req->context;
1080 struct se_cmd *se_cmd = &cmd->se_cmd;
1081
1082 cmd->state = UASP_QUEUE_COMMAND;
1083
1084 if (req->status == -ESHUTDOWN) {
1085 struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
1086
1087 hash_del(&stream->node);
1088 target_put_sess_cmd(se_cmd);
1089 transport_generic_free_cmd(&cmd->se_cmd, 0);
1090 return;
1091 }
1092
1093 if (req->status) {
1094 pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
1095 goto cleanup;
1096 }
1097
1098 if (req->num_sgs == 0) {
1099 sg_copy_from_buffer(se_cmd->t_data_sg,
1100 se_cmd->t_data_nents,
1101 cmd->data_buf,
1102 se_cmd->data_length);
1103 }
1104
1105 cmd->flags |= USBG_CMD_PENDING_DATA_WRITE;
1106 queue_work(cmd->fu->tpg->workqueue, &cmd->work);
1107 return;
1108
1109 cleanup:
1110 target_put_sess_cmd(se_cmd);
1111
1112 /* Command was aborted due to overlapped tag */
1113 if (cmd->state == UASP_QUEUE_COMMAND &&
1114 cmd->tmr_rsp == RC_OVERLAPPED_TAG) {
1115 uasp_send_tm_response(cmd);
1116 return;
1117 }
1118
1119 transport_send_check_condition_and_sense(se_cmd,
1120 TCM_CHECK_CONDITION_ABORT_CMD, 0);
1121 }
1122
usbg_prepare_w_request(struct usbg_cmd * cmd,struct usb_request * req)1123 static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
1124 {
1125 struct se_cmd *se_cmd = &cmd->se_cmd;
1126 struct f_uas *fu = cmd->fu;
1127 struct usb_gadget *gadget = fuas_to_gadget(fu);
1128
1129 if (!gadget->sg_supported) {
1130 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
1131 if (!cmd->data_buf)
1132 return -ENOMEM;
1133
1134 req->buf = cmd->data_buf;
1135 } else {
1136 req->buf = NULL;
1137 req->num_sgs = se_cmd->t_data_nents;
1138 req->sg = se_cmd->t_data_sg;
1139 }
1140
1141 req->is_last = 1;
1142 req->stream_id = cmd->tag;
1143 req->complete = usbg_data_write_cmpl;
1144 req->length = se_cmd->data_length;
1145 req->context = cmd;
1146
1147 cmd->state = UASP_SEND_STATUS;
1148 return 0;
1149 }
1150
usbg_send_status_response(struct se_cmd * se_cmd)1151 static int usbg_send_status_response(struct se_cmd *se_cmd)
1152 {
1153 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1154 se_cmd);
1155 struct f_uas *fu = cmd->fu;
1156
1157 if (fu->flags & USBG_IS_BOT)
1158 return bot_send_status_response(cmd);
1159 else
1160 return uasp_send_status_response(cmd);
1161 }
1162
usbg_send_write_request(struct se_cmd * se_cmd)1163 static int usbg_send_write_request(struct se_cmd *se_cmd)
1164 {
1165 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1166 se_cmd);
1167 struct f_uas *fu = cmd->fu;
1168
1169 if (fu->flags & USBG_IS_BOT)
1170 return bot_send_write_request(cmd);
1171 else
1172 return uasp_send_write_request(cmd);
1173 }
1174
usbg_send_read_response(struct se_cmd * se_cmd)1175 static int usbg_send_read_response(struct se_cmd *se_cmd)
1176 {
1177 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1178 se_cmd);
1179 struct f_uas *fu = cmd->fu;
1180
1181 if (fu->flags & USBG_IS_BOT)
1182 return bot_send_read_response(cmd);
1183 else
1184 return uasp_send_read_response(cmd);
1185 }
1186
1187 static void usbg_aborted_task(struct se_cmd *se_cmd);
1188
usbg_submit_tmr(struct usbg_cmd * cmd)1189 static void usbg_submit_tmr(struct usbg_cmd *cmd)
1190 {
1191 struct se_session *se_sess;
1192 struct se_cmd *se_cmd;
1193 int flags = TARGET_SCF_ACK_KREF;
1194
1195 se_cmd = &cmd->se_cmd;
1196 se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
1197
1198 target_submit_tmr(se_cmd, se_sess,
1199 cmd->response_iu.add_response_info,
1200 cmd->unpacked_lun, NULL, uasp_to_tcm_func(cmd->tmr_func),
1201 GFP_ATOMIC, cmd->tag, flags);
1202 }
1203
usbg_submit_cmd(struct usbg_cmd * cmd)1204 static void usbg_submit_cmd(struct usbg_cmd *cmd)
1205 {
1206 struct se_cmd *se_cmd;
1207 struct tcm_usbg_nexus *tv_nexus;
1208 struct usbg_tpg *tpg;
1209 int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
1210
1211 /*
1212 * Note: each command will spawn its own process, and each stage of the
1213 * command is processed sequentially. Should this no longer be the case,
1214 * locking is needed.
1215 */
1216 if (cmd->flags & USBG_CMD_PENDING_DATA_WRITE) {
1217 target_execute_cmd(&cmd->se_cmd);
1218 cmd->flags &= ~USBG_CMD_PENDING_DATA_WRITE;
1219 return;
1220 }
1221
1222 se_cmd = &cmd->se_cmd;
1223 tpg = cmd->fu->tpg;
1224 tv_nexus = tpg->tpg_nexus;
1225 dir = get_cmd_dir(cmd->cmd_buf);
1226 if (dir < 0)
1227 goto out;
1228
1229 target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
1230 cmd->sense_iu.sense, cmd->unpacked_lun, 0,
1231 cmd->prio_attr, dir, flags);
1232
1233 return;
1234
1235 out:
1236 __target_init_cmd(se_cmd,
1237 tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
1238 tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
1239 cmd->prio_attr, cmd->sense_iu.sense,
1240 cmd->unpacked_lun, NULL);
1241 transport_send_check_condition_and_sense(se_cmd,
1242 TCM_UNSUPPORTED_SCSI_OPCODE, 0);
1243 }
1244
usbg_cmd_work(struct work_struct * work)1245 static void usbg_cmd_work(struct work_struct *work)
1246 {
1247 struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
1248
1249 /*
1250 * Failure is detected by f_tcm here. Skip submitting the command to the
1251 * target core if we already know the failing response and send the usb
1252 * response to the host directly.
1253 */
1254 if (cmd->tmr_rsp != RC_RESPONSE_UNKNOWN)
1255 goto skip;
1256
1257 if (cmd->tmr_func)
1258 usbg_submit_tmr(cmd);
1259 else
1260 usbg_submit_cmd(cmd);
1261
1262 return;
1263
1264 skip:
1265 if (cmd->tmr_rsp == RC_OVERLAPPED_TAG) {
1266 struct f_uas *fu = cmd->fu;
1267 struct se_session *se_sess;
1268 struct uas_stream *stream = NULL;
1269 struct hlist_node *tmp;
1270 struct usbg_cmd *active_cmd = NULL;
1271
1272 se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
1273
1274 hash_for_each_possible_safe(fu->stream_hash, stream, tmp, node, cmd->tag) {
1275 int i = stream - &fu->stream[0];
1276
1277 active_cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[i];
1278 if (active_cmd->tag == cmd->tag)
1279 break;
1280 }
1281
1282 /* Sanity check */
1283 if (!stream || (active_cmd && active_cmd->tag != cmd->tag)) {
1284 usbg_submit_command(cmd->fu, cmd->req);
1285 return;
1286 }
1287
1288 reinit_completion(&stream->cmd_completion);
1289
1290 /*
1291 * A UASP command consists of the command, data, and status
1292 * stages, each operating sequentially from different endpoints.
1293 *
1294 * Each USB endpoint operates independently, and depending on
1295 * hardware implementation, a completion callback for a transfer
1296 * from one endpoint may not reflect the order of completion on
1297 * the wire. This is particularly true for devices with
1298 * endpoints that have independent interrupts and event buffers.
1299 *
1300 * The driver must still detect misbehaving hosts and respond
1301 * with an overlap status. To reduce false overlap failures,
1302 * allow the active and matching stream ID a brief 1ms to
1303 * complete before responding with an overlap command failure.
1304 * Overlap failure should be rare.
1305 */
1306 wait_for_completion_timeout(&stream->cmd_completion, msecs_to_jiffies(1));
1307
1308 /* If the previous stream is completed, retry the command. */
1309 if (!hash_hashed(&stream->node)) {
1310 usbg_submit_command(cmd->fu, cmd->req);
1311 return;
1312 }
1313
1314 /*
1315 * The command isn't submitted to the target core, so we're safe
1316 * to remove the bitmap index from the session tag pool.
1317 */
1318 sbitmap_queue_clear(&se_sess->sess_tag_pool,
1319 cmd->se_cmd.map_tag,
1320 cmd->se_cmd.map_cpu);
1321
1322 /*
1323 * Overlap command tag detected. Cancel any pending transfer of
1324 * the command submitted to target core.
1325 */
1326 active_cmd->tmr_rsp = RC_OVERLAPPED_TAG;
1327 usbg_aborted_task(&active_cmd->se_cmd);
1328
1329 /* Send the response after the transfer is aborted. */
1330 return;
1331 }
1332
1333 uasp_send_tm_response(cmd);
1334 }
1335
usbg_get_cmd(struct f_uas * fu,struct tcm_usbg_nexus * tv_nexus,u32 scsi_tag)1336 static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
1337 struct tcm_usbg_nexus *tv_nexus, u32 scsi_tag)
1338 {
1339 struct se_session *se_sess = tv_nexus->tvn_se_sess;
1340 struct usbg_cmd *cmd;
1341 int tag, cpu;
1342
1343 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
1344 if (tag < 0)
1345 return ERR_PTR(-ENOMEM);
1346
1347 cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
1348 memset(cmd, 0, sizeof(*cmd));
1349 cmd->se_cmd.map_tag = tag;
1350 cmd->se_cmd.map_cpu = cpu;
1351 cmd->se_cmd.cpuid = cpu;
1352 cmd->se_cmd.tag = cmd->tag = scsi_tag;
1353 cmd->fu = fu;
1354
1355 return cmd;
1356 }
1357
1358 static void usbg_release_cmd(struct se_cmd *);
1359
usbg_submit_command(struct f_uas * fu,struct usb_request * req)1360 static int usbg_submit_command(struct f_uas *fu, struct usb_request *req)
1361 {
1362 struct iu *iu = req->buf;
1363 struct usbg_cmd *cmd;
1364 struct usbg_tpg *tpg = fu->tpg;
1365 struct tcm_usbg_nexus *tv_nexus;
1366 struct uas_stream *stream;
1367 struct hlist_node *tmp;
1368 struct command_iu *cmd_iu;
1369 u32 cmd_len;
1370 u16 scsi_tag;
1371
1372 tv_nexus = tpg->tpg_nexus;
1373 if (!tv_nexus) {
1374 pr_err("Missing nexus, ignoring command\n");
1375 return -EINVAL;
1376 }
1377
1378 scsi_tag = be16_to_cpup(&iu->tag);
1379 cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
1380 if (IS_ERR(cmd)) {
1381 pr_err("usbg_get_cmd failed\n");
1382 return -ENOMEM;
1383 }
1384
1385 cmd->req = req;
1386 cmd->fu = fu;
1387 cmd->tag = scsi_tag;
1388 cmd->se_cmd.tag = scsi_tag;
1389 cmd->tmr_func = 0;
1390 cmd->tmr_rsp = RC_RESPONSE_UNKNOWN;
1391 cmd->flags = 0;
1392
1393 cmd_iu = (struct command_iu *)iu;
1394
1395 /* Command and Task Management IUs share the same LUN offset */
1396 cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
1397
1398 if (iu->iu_id != IU_ID_COMMAND && iu->iu_id != IU_ID_TASK_MGMT) {
1399 cmd->tmr_rsp = RC_INVALID_INFO_UNIT;
1400 goto skip;
1401 }
1402
1403 hash_for_each_possible_safe(fu->stream_hash, stream, tmp, node, scsi_tag) {
1404 struct usbg_cmd *active_cmd;
1405 struct se_session *se_sess;
1406 int i = stream - &fu->stream[0];
1407
1408 se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
1409 active_cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[i];
1410
1411 if (active_cmd->tag == scsi_tag) {
1412 cmd->tmr_rsp = RC_OVERLAPPED_TAG;
1413 goto skip;
1414 }
1415 }
1416
1417 stream = &fu->stream[cmd->se_cmd.map_tag];
1418 hash_add(fu->stream_hash, &stream->node, scsi_tag);
1419
1420 if (iu->iu_id == IU_ID_TASK_MGMT) {
1421 struct task_mgmt_iu *tm_iu;
1422
1423 tm_iu = (struct task_mgmt_iu *)iu;
1424 cmd->tmr_func = tm_iu->function;
1425 goto skip;
1426 }
1427
1428 cmd_len = (cmd_iu->len & ~0x3) + 16;
1429 if (cmd_len > USBG_MAX_CMD) {
1430 target_free_tag(tv_nexus->tvn_se_sess, &cmd->se_cmd);
1431 hash_del(&stream->node);
1432 return -EINVAL;
1433 }
1434 memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
1435
1436 switch (cmd_iu->prio_attr & 0x7) {
1437 case UAS_HEAD_TAG:
1438 cmd->prio_attr = TCM_HEAD_TAG;
1439 break;
1440 case UAS_ORDERED_TAG:
1441 cmd->prio_attr = TCM_ORDERED_TAG;
1442 break;
1443 case UAS_ACA:
1444 cmd->prio_attr = TCM_ACA_TAG;
1445 break;
1446 default:
1447 pr_debug_once("Unsupported prio_attr: %02x.\n",
1448 cmd_iu->prio_attr);
1449 fallthrough;
1450 case UAS_SIMPLE_TAG:
1451 cmd->prio_attr = TCM_SIMPLE_TAG;
1452 break;
1453 }
1454
1455 skip:
1456 INIT_WORK(&cmd->work, usbg_cmd_work);
1457 queue_work(tpg->workqueue, &cmd->work);
1458
1459 return 0;
1460 }
1461
bot_cmd_work(struct work_struct * work)1462 static void bot_cmd_work(struct work_struct *work)
1463 {
1464 struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
1465 struct se_cmd *se_cmd;
1466 struct tcm_usbg_nexus *tv_nexus;
1467 struct usbg_tpg *tpg;
1468 int flags = TARGET_SCF_ACK_KREF;
1469 int dir;
1470
1471 /*
1472 * Note: each command will spawn its own process, and each stage of the
1473 * command is processed sequentially. Should this no longer be the case,
1474 * locking is needed.
1475 */
1476 if (cmd->flags & USBG_CMD_PENDING_DATA_WRITE) {
1477 target_execute_cmd(&cmd->se_cmd);
1478 cmd->flags &= ~USBG_CMD_PENDING_DATA_WRITE;
1479 return;
1480 }
1481
1482 se_cmd = &cmd->se_cmd;
1483 tpg = cmd->fu->tpg;
1484 tv_nexus = tpg->tpg_nexus;
1485 dir = get_cmd_dir(cmd->cmd_buf);
1486 if (dir < 0)
1487 goto out;
1488
1489 target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
1490 cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
1491 cmd->data_len, cmd->prio_attr, dir, flags);
1492 return;
1493
1494 out:
1495 __target_init_cmd(se_cmd,
1496 tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
1497 tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
1498 cmd->prio_attr, cmd->sense_iu.sense,
1499 cmd->unpacked_lun, NULL);
1500 transport_send_check_condition_and_sense(se_cmd,
1501 TCM_UNSUPPORTED_SCSI_OPCODE, 0);
1502 }
1503
bot_submit_command(struct f_uas * fu,void * cmdbuf,unsigned int len)1504 static int bot_submit_command(struct f_uas *fu,
1505 void *cmdbuf, unsigned int len)
1506 {
1507 struct bulk_cb_wrap *cbw = cmdbuf;
1508 struct usbg_cmd *cmd;
1509 struct usbg_tpg *tpg = fu->tpg;
1510 struct tcm_usbg_nexus *tv_nexus;
1511 u32 cmd_len;
1512
1513 if (cbw->Signature != cpu_to_le32(US_BULK_CB_SIGN)) {
1514 pr_err("Wrong signature on CBW\n");
1515 return -EINVAL;
1516 }
1517 if (len != 31) {
1518 pr_err("Wrong length for CBW\n");
1519 return -EINVAL;
1520 }
1521
1522 cmd_len = cbw->Length;
1523 if (cmd_len < 1 || cmd_len > 16)
1524 return -EINVAL;
1525
1526 tv_nexus = tpg->tpg_nexus;
1527 if (!tv_nexus) {
1528 pr_err("Missing nexus, ignoring command\n");
1529 return -ENODEV;
1530 }
1531
1532 cmd = usbg_get_cmd(fu, tv_nexus, cbw->Tag);
1533 if (IS_ERR(cmd)) {
1534 pr_err("usbg_get_cmd failed\n");
1535 return -ENOMEM;
1536 }
1537 memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
1538
1539 cmd->bot_tag = cbw->Tag;
1540 cmd->prio_attr = TCM_SIMPLE_TAG;
1541 cmd->unpacked_lun = cbw->Lun;
1542 cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
1543 cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
1544 cmd->se_cmd.tag = le32_to_cpu(cmd->bot_tag);
1545 cmd->flags = 0;
1546
1547 INIT_WORK(&cmd->work, bot_cmd_work);
1548 queue_work(tpg->workqueue, &cmd->work);
1549
1550 return 0;
1551 }
1552
1553 /* Start fabric.c code */
1554
usbg_check_true(struct se_portal_group * se_tpg)1555 static int usbg_check_true(struct se_portal_group *se_tpg)
1556 {
1557 return 1;
1558 }
1559
usbg_get_fabric_wwn(struct se_portal_group * se_tpg)1560 static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
1561 {
1562 struct usbg_tpg *tpg = container_of(se_tpg,
1563 struct usbg_tpg, se_tpg);
1564 struct usbg_tport *tport = tpg->tport;
1565
1566 return &tport->tport_name[0];
1567 }
1568
usbg_get_tag(struct se_portal_group * se_tpg)1569 static u16 usbg_get_tag(struct se_portal_group *se_tpg)
1570 {
1571 struct usbg_tpg *tpg = container_of(se_tpg,
1572 struct usbg_tpg, se_tpg);
1573 return tpg->tport_tpgt;
1574 }
1575
usbg_release_cmd(struct se_cmd * se_cmd)1576 static void usbg_release_cmd(struct se_cmd *se_cmd)
1577 {
1578 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1579 se_cmd);
1580 struct se_session *se_sess = se_cmd->se_sess;
1581
1582 cmd->tag = 0;
1583 kfree(cmd->data_buf);
1584 target_free_tag(se_sess, se_cmd);
1585 }
1586
usbg_queue_tm_rsp(struct se_cmd * se_cmd)1587 static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
1588 {
1589 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, se_cmd);
1590
1591 uasp_send_tm_response(cmd);
1592 }
1593
usbg_aborted_task(struct se_cmd * se_cmd)1594 static void usbg_aborted_task(struct se_cmd *se_cmd)
1595 {
1596 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, se_cmd);
1597 struct f_uas *fu = cmd->fu;
1598 struct usb_gadget *gadget = fuas_to_gadget(fu);
1599 struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
1600 int ret = 0;
1601
1602 if (stream->req_out->status == -EINPROGRESS)
1603 ret = usb_ep_dequeue(fu->ep_out, stream->req_out);
1604 else if (stream->req_in->status == -EINPROGRESS)
1605 ret = usb_ep_dequeue(fu->ep_in, stream->req_in);
1606 else if (stream->req_status->status == -EINPROGRESS)
1607 ret = usb_ep_dequeue(fu->ep_status, stream->req_status);
1608
1609 if (ret)
1610 dev_err(&gadget->dev, "Failed to abort cmd tag %d, (%d)\n",
1611 cmd->tag, ret);
1612
1613 cmd->state = UASP_QUEUE_COMMAND;
1614 }
1615
usbg_check_wwn(const char * name)1616 static const char *usbg_check_wwn(const char *name)
1617 {
1618 const char *n;
1619 unsigned int len;
1620
1621 n = strstr(name, "naa.");
1622 if (!n)
1623 return NULL;
1624 n += 4;
1625 len = strlen(n);
1626 if (len == 0 || len > USBG_NAMELEN - 1)
1627 return NULL;
1628 return n;
1629 }
1630
usbg_init_nodeacl(struct se_node_acl * se_nacl,const char * name)1631 static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1632 {
1633 if (!usbg_check_wwn(name))
1634 return -EINVAL;
1635 return 0;
1636 }
1637
usbg_make_tpg(struct se_wwn * wwn,const char * name)1638 static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
1639 const char *name)
1640 {
1641 struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
1642 tport_wwn);
1643 struct usbg_tpg *tpg;
1644 unsigned long tpgt;
1645 int ret;
1646 struct f_tcm_opts *opts;
1647 unsigned i;
1648
1649 if (strstr(name, "tpgt_") != name)
1650 return ERR_PTR(-EINVAL);
1651 if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX)
1652 return ERR_PTR(-EINVAL);
1653 ret = -ENODEV;
1654 mutex_lock(&tpg_instances_lock);
1655 for (i = 0; i < TPG_INSTANCES; ++i)
1656 if (tpg_instances[i].func_inst && !tpg_instances[i].tpg)
1657 break;
1658 if (i == TPG_INSTANCES)
1659 goto unlock_inst;
1660
1661 opts = container_of(tpg_instances[i].func_inst, struct f_tcm_opts,
1662 func_inst);
1663 mutex_lock(&opts->dep_lock);
1664 if (!opts->ready)
1665 goto unlock_dep;
1666
1667 if (opts->has_dep) {
1668 if (!try_module_get(opts->dependent))
1669 goto unlock_dep;
1670 } else {
1671 ret = configfs_depend_item_unlocked(
1672 wwn->wwn_group.cg_subsys,
1673 &opts->func_inst.group.cg_item);
1674 if (ret)
1675 goto unlock_dep;
1676 }
1677
1678 tpg = kzalloc(sizeof(struct usbg_tpg), GFP_KERNEL);
1679 ret = -ENOMEM;
1680 if (!tpg)
1681 goto unref_dep;
1682 mutex_init(&tpg->tpg_mutex);
1683 atomic_set(&tpg->tpg_port_count, 0);
1684 tpg->workqueue = alloc_workqueue("tcm_usb_gadget",
1685 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
1686 if (!tpg->workqueue)
1687 goto free_tpg;
1688
1689 tpg->tport = tport;
1690 tpg->tport_tpgt = tpgt;
1691
1692 /*
1693 * SPC doesn't assign a protocol identifier for USB-SCSI, so we
1694 * pretend to be SAS..
1695 */
1696 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);
1697 if (ret < 0)
1698 goto free_workqueue;
1699
1700 tpg_instances[i].tpg = tpg;
1701 tpg->fi = tpg_instances[i].func_inst;
1702 mutex_unlock(&opts->dep_lock);
1703 mutex_unlock(&tpg_instances_lock);
1704 return &tpg->se_tpg;
1705
1706 free_workqueue:
1707 destroy_workqueue(tpg->workqueue);
1708 free_tpg:
1709 kfree(tpg);
1710 unref_dep:
1711 if (opts->has_dep)
1712 module_put(opts->dependent);
1713 else
1714 configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item);
1715 unlock_dep:
1716 mutex_unlock(&opts->dep_lock);
1717 unlock_inst:
1718 mutex_unlock(&tpg_instances_lock);
1719
1720 return ERR_PTR(ret);
1721 }
1722
1723 static int tcm_usbg_drop_nexus(struct usbg_tpg *);
1724
usbg_drop_tpg(struct se_portal_group * se_tpg)1725 static void usbg_drop_tpg(struct se_portal_group *se_tpg)
1726 {
1727 struct usbg_tpg *tpg = container_of(se_tpg,
1728 struct usbg_tpg, se_tpg);
1729 unsigned i;
1730 struct f_tcm_opts *opts;
1731
1732 tcm_usbg_drop_nexus(tpg);
1733 core_tpg_deregister(se_tpg);
1734 destroy_workqueue(tpg->workqueue);
1735
1736 mutex_lock(&tpg_instances_lock);
1737 for (i = 0; i < TPG_INSTANCES; ++i)
1738 if (tpg_instances[i].tpg == tpg)
1739 break;
1740 if (i < TPG_INSTANCES) {
1741 tpg_instances[i].tpg = NULL;
1742 opts = container_of(tpg_instances[i].func_inst,
1743 struct f_tcm_opts, func_inst);
1744 mutex_lock(&opts->dep_lock);
1745 if (opts->has_dep)
1746 module_put(opts->dependent);
1747 else
1748 configfs_undepend_item_unlocked(
1749 &opts->func_inst.group.cg_item);
1750 mutex_unlock(&opts->dep_lock);
1751 }
1752 mutex_unlock(&tpg_instances_lock);
1753
1754 kfree(tpg);
1755 }
1756
usbg_make_tport(struct target_fabric_configfs * tf,struct config_group * group,const char * name)1757 static struct se_wwn *usbg_make_tport(
1758 struct target_fabric_configfs *tf,
1759 struct config_group *group,
1760 const char *name)
1761 {
1762 struct usbg_tport *tport;
1763 const char *wnn_name;
1764 u64 wwpn = 0;
1765
1766 wnn_name = usbg_check_wwn(name);
1767 if (!wnn_name)
1768 return ERR_PTR(-EINVAL);
1769
1770 tport = kzalloc(sizeof(struct usbg_tport), GFP_KERNEL);
1771 if (!(tport))
1772 return ERR_PTR(-ENOMEM);
1773
1774 tport->tport_wwpn = wwpn;
1775 snprintf(tport->tport_name, sizeof(tport->tport_name), "%s", wnn_name);
1776 return &tport->tport_wwn;
1777 }
1778
usbg_drop_tport(struct se_wwn * wwn)1779 static void usbg_drop_tport(struct se_wwn *wwn)
1780 {
1781 struct usbg_tport *tport = container_of(wwn,
1782 struct usbg_tport, tport_wwn);
1783 kfree(tport);
1784 }
1785
1786 /*
1787 * If somebody feels like dropping the version property, go ahead.
1788 */
usbg_wwn_version_show(struct config_item * item,char * page)1789 static ssize_t usbg_wwn_version_show(struct config_item *item, char *page)
1790 {
1791 return sprintf(page, "usb-gadget fabric module\n");
1792 }
1793
1794 CONFIGFS_ATTR_RO(usbg_wwn_, version);
1795
1796 static struct configfs_attribute *usbg_wwn_attrs[] = {
1797 &usbg_wwn_attr_version,
1798 NULL,
1799 };
1800
1801 static int usbg_attach(struct usbg_tpg *);
1802 static void usbg_detach(struct usbg_tpg *);
1803
usbg_enable_tpg(struct se_portal_group * se_tpg,bool enable)1804 static int usbg_enable_tpg(struct se_portal_group *se_tpg, bool enable)
1805 {
1806 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1807 int ret = 0;
1808
1809 if (enable)
1810 ret = usbg_attach(tpg);
1811 else
1812 usbg_detach(tpg);
1813 if (ret)
1814 return ret;
1815
1816 tpg->gadget_connect = enable;
1817
1818 return 0;
1819 }
1820
tcm_usbg_tpg_nexus_show(struct config_item * item,char * page)1821 static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page)
1822 {
1823 struct se_portal_group *se_tpg = to_tpg(item);
1824 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1825 struct tcm_usbg_nexus *tv_nexus;
1826 ssize_t ret;
1827
1828 mutex_lock(&tpg->tpg_mutex);
1829 tv_nexus = tpg->tpg_nexus;
1830 if (!tv_nexus) {
1831 ret = -ENODEV;
1832 goto out;
1833 }
1834 ret = sysfs_emit(page, "%s\n",
1835 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1836 out:
1837 mutex_unlock(&tpg->tpg_mutex);
1838 return ret;
1839 }
1840
usbg_alloc_sess_cb(struct se_portal_group * se_tpg,struct se_session * se_sess,void * p)1841 static int usbg_alloc_sess_cb(struct se_portal_group *se_tpg,
1842 struct se_session *se_sess, void *p)
1843 {
1844 struct usbg_tpg *tpg = container_of(se_tpg,
1845 struct usbg_tpg, se_tpg);
1846
1847 tpg->tpg_nexus = p;
1848 return 0;
1849 }
1850
tcm_usbg_make_nexus(struct usbg_tpg * tpg,char * name)1851 static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
1852 {
1853 struct tcm_usbg_nexus *tv_nexus;
1854 int ret = 0;
1855
1856 mutex_lock(&tpg->tpg_mutex);
1857 if (tpg->tpg_nexus) {
1858 ret = -EEXIST;
1859 pr_debug("tpg->tpg_nexus already exists\n");
1860 goto out_unlock;
1861 }
1862
1863 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
1864 if (!tv_nexus) {
1865 ret = -ENOMEM;
1866 goto out_unlock;
1867 }
1868
1869 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
1870 USB_G_DEFAULT_SESSION_TAGS,
1871 sizeof(struct usbg_cmd),
1872 TARGET_PROT_NORMAL, name,
1873 tv_nexus, usbg_alloc_sess_cb);
1874 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1875 #define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
1876 pr_debug(MAKE_NEXUS_MSG, name);
1877 #undef MAKE_NEXUS_MSG
1878 ret = PTR_ERR(tv_nexus->tvn_se_sess);
1879 kfree(tv_nexus);
1880 }
1881
1882 out_unlock:
1883 mutex_unlock(&tpg->tpg_mutex);
1884 return ret;
1885 }
1886
tcm_usbg_drop_nexus(struct usbg_tpg * tpg)1887 static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
1888 {
1889 struct se_session *se_sess;
1890 struct tcm_usbg_nexus *tv_nexus;
1891 int ret = -ENODEV;
1892
1893 mutex_lock(&tpg->tpg_mutex);
1894 tv_nexus = tpg->tpg_nexus;
1895 if (!tv_nexus)
1896 goto out;
1897
1898 se_sess = tv_nexus->tvn_se_sess;
1899 if (!se_sess)
1900 goto out;
1901
1902 if (atomic_read(&tpg->tpg_port_count)) {
1903 ret = -EPERM;
1904 #define MSG "Unable to remove Host I_T Nexus with active TPG port count: %d\n"
1905 pr_err(MSG, atomic_read(&tpg->tpg_port_count));
1906 #undef MSG
1907 goto out;
1908 }
1909
1910 pr_debug("Removing I_T Nexus to Initiator Port: %s\n",
1911 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1912 /*
1913 * Release the SCSI I_T Nexus to the emulated vHost Target Port
1914 */
1915 target_remove_session(se_sess);
1916 tpg->tpg_nexus = NULL;
1917
1918 kfree(tv_nexus);
1919 ret = 0;
1920 out:
1921 mutex_unlock(&tpg->tpg_mutex);
1922 return ret;
1923 }
1924
tcm_usbg_tpg_nexus_store(struct config_item * item,const char * page,size_t count)1925 static ssize_t tcm_usbg_tpg_nexus_store(struct config_item *item,
1926 const char *page, size_t count)
1927 {
1928 struct se_portal_group *se_tpg = to_tpg(item);
1929 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1930 unsigned char i_port[USBG_NAMELEN], *ptr;
1931 int ret;
1932
1933 if (!strncmp(page, "NULL", 4)) {
1934 ret = tcm_usbg_drop_nexus(tpg);
1935 return (!ret) ? count : ret;
1936 }
1937 if (strlen(page) >= USBG_NAMELEN) {
1938
1939 #define NEXUS_STORE_MSG "Emulated NAA Sas Address: %s, exceeds max: %d\n"
1940 pr_err(NEXUS_STORE_MSG, page, USBG_NAMELEN);
1941 #undef NEXUS_STORE_MSG
1942 return -EINVAL;
1943 }
1944 snprintf(i_port, USBG_NAMELEN, "%s", page);
1945
1946 ptr = strstr(i_port, "naa.");
1947 if (!ptr) {
1948 pr_err("Missing 'naa.' prefix\n");
1949 return -EINVAL;
1950 }
1951
1952 if (i_port[strlen(i_port) - 1] == '\n')
1953 i_port[strlen(i_port) - 1] = '\0';
1954
1955 ret = tcm_usbg_make_nexus(tpg, &i_port[0]);
1956 if (ret < 0)
1957 return ret;
1958 return count;
1959 }
1960
1961 CONFIGFS_ATTR(tcm_usbg_tpg_, nexus);
1962
1963 static struct configfs_attribute *usbg_base_attrs[] = {
1964 &tcm_usbg_tpg_attr_nexus,
1965 NULL,
1966 };
1967
usbg_port_link(struct se_portal_group * se_tpg,struct se_lun * lun)1968 static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun)
1969 {
1970 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1971
1972 atomic_inc(&tpg->tpg_port_count);
1973 smp_mb__after_atomic();
1974 return 0;
1975 }
1976
usbg_port_unlink(struct se_portal_group * se_tpg,struct se_lun * se_lun)1977 static void usbg_port_unlink(struct se_portal_group *se_tpg,
1978 struct se_lun *se_lun)
1979 {
1980 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1981
1982 atomic_dec(&tpg->tpg_port_count);
1983 smp_mb__after_atomic();
1984 }
1985
usbg_check_stop_free(struct se_cmd * se_cmd)1986 static int usbg_check_stop_free(struct se_cmd *se_cmd)
1987 {
1988 return target_put_sess_cmd(se_cmd);
1989 }
1990
1991 static const struct target_core_fabric_ops usbg_ops = {
1992 .module = THIS_MODULE,
1993 .fabric_name = "usb_gadget",
1994 .tpg_get_wwn = usbg_get_fabric_wwn,
1995 .tpg_get_tag = usbg_get_tag,
1996 .tpg_check_demo_mode = usbg_check_true,
1997 .release_cmd = usbg_release_cmd,
1998 .sess_get_initiator_sid = NULL,
1999 .write_pending = usbg_send_write_request,
2000 .queue_data_in = usbg_send_read_response,
2001 .queue_status = usbg_send_status_response,
2002 .queue_tm_rsp = usbg_queue_tm_rsp,
2003 .aborted_task = usbg_aborted_task,
2004 .check_stop_free = usbg_check_stop_free,
2005
2006 .fabric_make_wwn = usbg_make_tport,
2007 .fabric_drop_wwn = usbg_drop_tport,
2008 .fabric_make_tpg = usbg_make_tpg,
2009 .fabric_enable_tpg = usbg_enable_tpg,
2010 .fabric_drop_tpg = usbg_drop_tpg,
2011 .fabric_post_link = usbg_port_link,
2012 .fabric_pre_unlink = usbg_port_unlink,
2013 .fabric_init_nodeacl = usbg_init_nodeacl,
2014
2015 .tfc_wwn_attrs = usbg_wwn_attrs,
2016 .tfc_tpg_base_attrs = usbg_base_attrs,
2017
2018 .default_submit_type = TARGET_DIRECT_SUBMIT,
2019 .direct_submit_supp = 1,
2020 };
2021
2022 /* Start gadget.c code */
2023
2024 static struct usb_interface_descriptor bot_intf_desc = {
2025 .bLength = sizeof(bot_intf_desc),
2026 .bDescriptorType = USB_DT_INTERFACE,
2027 .bNumEndpoints = 2,
2028 .bAlternateSetting = USB_G_ALT_INT_BBB,
2029 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
2030 .bInterfaceSubClass = USB_SC_SCSI,
2031 .bInterfaceProtocol = USB_PR_BULK,
2032 };
2033
2034 static struct usb_interface_descriptor uasp_intf_desc = {
2035 .bLength = sizeof(uasp_intf_desc),
2036 .bDescriptorType = USB_DT_INTERFACE,
2037 .bNumEndpoints = 4,
2038 .bAlternateSetting = USB_G_ALT_INT_UAS,
2039 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
2040 .bInterfaceSubClass = USB_SC_SCSI,
2041 .bInterfaceProtocol = USB_PR_UAS,
2042 };
2043
2044 static struct usb_endpoint_descriptor uasp_bi_desc = {
2045 .bLength = USB_DT_ENDPOINT_SIZE,
2046 .bDescriptorType = USB_DT_ENDPOINT,
2047 .bEndpointAddress = USB_DIR_IN,
2048 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2049 .wMaxPacketSize = cpu_to_le16(512),
2050 };
2051
2052 static struct usb_endpoint_descriptor uasp_fs_bi_desc = {
2053 .bLength = USB_DT_ENDPOINT_SIZE,
2054 .bDescriptorType = USB_DT_ENDPOINT,
2055 .bEndpointAddress = USB_DIR_IN,
2056 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2057 };
2058
2059 static struct usb_pipe_usage_descriptor uasp_bi_pipe_desc = {
2060 .bLength = sizeof(uasp_bi_pipe_desc),
2061 .bDescriptorType = USB_DT_PIPE_USAGE,
2062 .bPipeID = DATA_IN_PIPE_ID,
2063 };
2064
2065 static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
2066 .bLength = USB_DT_ENDPOINT_SIZE,
2067 .bDescriptorType = USB_DT_ENDPOINT,
2068 .bEndpointAddress = USB_DIR_IN,
2069 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2070 .wMaxPacketSize = cpu_to_le16(1024),
2071 };
2072
2073 static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
2074 .bLength = sizeof(uasp_bi_ep_comp_desc),
2075 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
2076 .bMaxBurst = 15,
2077 .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
2078 .wBytesPerInterval = 0,
2079 };
2080
2081 static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
2082 .bLength = sizeof(bot_bi_ep_comp_desc),
2083 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
2084 .bMaxBurst = 15,
2085 };
2086
2087 static struct usb_endpoint_descriptor uasp_bo_desc = {
2088 .bLength = USB_DT_ENDPOINT_SIZE,
2089 .bDescriptorType = USB_DT_ENDPOINT,
2090 .bEndpointAddress = USB_DIR_OUT,
2091 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2092 .wMaxPacketSize = cpu_to_le16(512),
2093 };
2094
2095 static struct usb_endpoint_descriptor uasp_fs_bo_desc = {
2096 .bLength = USB_DT_ENDPOINT_SIZE,
2097 .bDescriptorType = USB_DT_ENDPOINT,
2098 .bEndpointAddress = USB_DIR_OUT,
2099 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2100 };
2101
2102 static struct usb_pipe_usage_descriptor uasp_bo_pipe_desc = {
2103 .bLength = sizeof(uasp_bo_pipe_desc),
2104 .bDescriptorType = USB_DT_PIPE_USAGE,
2105 .bPipeID = DATA_OUT_PIPE_ID,
2106 };
2107
2108 static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
2109 .bLength = USB_DT_ENDPOINT_SIZE,
2110 .bDescriptorType = USB_DT_ENDPOINT,
2111 .bEndpointAddress = USB_DIR_OUT,
2112 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2113 .wMaxPacketSize = cpu_to_le16(0x400),
2114 };
2115
2116 static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
2117 .bLength = sizeof(uasp_bo_ep_comp_desc),
2118 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
2119 .bMaxBurst = 15,
2120 .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
2121 };
2122
2123 static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
2124 .bLength = sizeof(bot_bo_ep_comp_desc),
2125 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
2126 .bMaxBurst = 15,
2127 };
2128
2129 static struct usb_endpoint_descriptor uasp_status_desc = {
2130 .bLength = USB_DT_ENDPOINT_SIZE,
2131 .bDescriptorType = USB_DT_ENDPOINT,
2132 .bEndpointAddress = USB_DIR_IN,
2133 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2134 .wMaxPacketSize = cpu_to_le16(512),
2135 };
2136
2137 static struct usb_endpoint_descriptor uasp_fs_status_desc = {
2138 .bLength = USB_DT_ENDPOINT_SIZE,
2139 .bDescriptorType = USB_DT_ENDPOINT,
2140 .bEndpointAddress = USB_DIR_IN,
2141 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2142 };
2143
2144 static struct usb_pipe_usage_descriptor uasp_status_pipe_desc = {
2145 .bLength = sizeof(uasp_status_pipe_desc),
2146 .bDescriptorType = USB_DT_PIPE_USAGE,
2147 .bPipeID = STATUS_PIPE_ID,
2148 };
2149
2150 static struct usb_endpoint_descriptor uasp_ss_status_desc = {
2151 .bLength = USB_DT_ENDPOINT_SIZE,
2152 .bDescriptorType = USB_DT_ENDPOINT,
2153 .bEndpointAddress = USB_DIR_IN,
2154 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2155 .wMaxPacketSize = cpu_to_le16(1024),
2156 };
2157
2158 static struct usb_ss_ep_comp_descriptor uasp_status_in_ep_comp_desc = {
2159 .bLength = sizeof(uasp_status_in_ep_comp_desc),
2160 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
2161 .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
2162 };
2163
2164 static struct usb_endpoint_descriptor uasp_cmd_desc = {
2165 .bLength = USB_DT_ENDPOINT_SIZE,
2166 .bDescriptorType = USB_DT_ENDPOINT,
2167 .bEndpointAddress = USB_DIR_OUT,
2168 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2169 .wMaxPacketSize = cpu_to_le16(512),
2170 };
2171
2172 static struct usb_endpoint_descriptor uasp_fs_cmd_desc = {
2173 .bLength = USB_DT_ENDPOINT_SIZE,
2174 .bDescriptorType = USB_DT_ENDPOINT,
2175 .bEndpointAddress = USB_DIR_OUT,
2176 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2177 };
2178
2179 static struct usb_pipe_usage_descriptor uasp_cmd_pipe_desc = {
2180 .bLength = sizeof(uasp_cmd_pipe_desc),
2181 .bDescriptorType = USB_DT_PIPE_USAGE,
2182 .bPipeID = CMD_PIPE_ID,
2183 };
2184
2185 static struct usb_endpoint_descriptor uasp_ss_cmd_desc = {
2186 .bLength = USB_DT_ENDPOINT_SIZE,
2187 .bDescriptorType = USB_DT_ENDPOINT,
2188 .bEndpointAddress = USB_DIR_OUT,
2189 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2190 .wMaxPacketSize = cpu_to_le16(1024),
2191 };
2192
2193 static struct usb_ss_ep_comp_descriptor uasp_cmd_comp_desc = {
2194 .bLength = sizeof(uasp_cmd_comp_desc),
2195 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
2196 };
2197
2198 static struct usb_descriptor_header *uasp_fs_function_desc[] = {
2199 (struct usb_descriptor_header *) &bot_intf_desc,
2200 (struct usb_descriptor_header *) &uasp_fs_bi_desc,
2201 (struct usb_descriptor_header *) &uasp_fs_bo_desc,
2202
2203 (struct usb_descriptor_header *) &uasp_intf_desc,
2204 (struct usb_descriptor_header *) &uasp_fs_bi_desc,
2205 (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
2206 (struct usb_descriptor_header *) &uasp_fs_bo_desc,
2207 (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
2208 (struct usb_descriptor_header *) &uasp_fs_status_desc,
2209 (struct usb_descriptor_header *) &uasp_status_pipe_desc,
2210 (struct usb_descriptor_header *) &uasp_fs_cmd_desc,
2211 (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
2212 NULL,
2213 };
2214
2215 static struct usb_descriptor_header *uasp_hs_function_desc[] = {
2216 (struct usb_descriptor_header *) &bot_intf_desc,
2217 (struct usb_descriptor_header *) &uasp_bi_desc,
2218 (struct usb_descriptor_header *) &uasp_bo_desc,
2219
2220 (struct usb_descriptor_header *) &uasp_intf_desc,
2221 (struct usb_descriptor_header *) &uasp_bi_desc,
2222 (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
2223 (struct usb_descriptor_header *) &uasp_bo_desc,
2224 (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
2225 (struct usb_descriptor_header *) &uasp_status_desc,
2226 (struct usb_descriptor_header *) &uasp_status_pipe_desc,
2227 (struct usb_descriptor_header *) &uasp_cmd_desc,
2228 (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
2229 NULL,
2230 };
2231
2232 static struct usb_descriptor_header *uasp_ss_function_desc[] = {
2233 (struct usb_descriptor_header *) &bot_intf_desc,
2234 (struct usb_descriptor_header *) &uasp_ss_bi_desc,
2235 (struct usb_descriptor_header *) &bot_bi_ep_comp_desc,
2236 (struct usb_descriptor_header *) &uasp_ss_bo_desc,
2237 (struct usb_descriptor_header *) &bot_bo_ep_comp_desc,
2238
2239 (struct usb_descriptor_header *) &uasp_intf_desc,
2240 (struct usb_descriptor_header *) &uasp_ss_bi_desc,
2241 (struct usb_descriptor_header *) &uasp_bi_ep_comp_desc,
2242 (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
2243 (struct usb_descriptor_header *) &uasp_ss_bo_desc,
2244 (struct usb_descriptor_header *) &uasp_bo_ep_comp_desc,
2245 (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
2246 (struct usb_descriptor_header *) &uasp_ss_status_desc,
2247 (struct usb_descriptor_header *) &uasp_status_in_ep_comp_desc,
2248 (struct usb_descriptor_header *) &uasp_status_pipe_desc,
2249 (struct usb_descriptor_header *) &uasp_ss_cmd_desc,
2250 (struct usb_descriptor_header *) &uasp_cmd_comp_desc,
2251 (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
2252 NULL,
2253 };
2254
2255 static struct usb_string tcm_us_strings[] = {
2256 [USB_G_STR_INT_UAS].s = "USB Attached SCSI",
2257 [USB_G_STR_INT_BBB].s = "Bulk Only Transport",
2258 { },
2259 };
2260
2261 static struct usb_gadget_strings tcm_stringtab = {
2262 .language = 0x0409,
2263 .strings = tcm_us_strings,
2264 };
2265
2266 static struct usb_gadget_strings *tcm_strings[] = {
2267 &tcm_stringtab,
2268 NULL,
2269 };
2270
tcm_bind(struct usb_configuration * c,struct usb_function * f)2271 static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
2272 {
2273 struct f_uas *fu = to_f_uas(f);
2274 struct usb_string *us;
2275 struct usb_gadget *gadget = c->cdev->gadget;
2276 struct usb_ep *ep;
2277 struct f_tcm_opts *opts;
2278 int iface;
2279 int ret;
2280
2281 opts = container_of(f->fi, struct f_tcm_opts, func_inst);
2282
2283 mutex_lock(&opts->dep_lock);
2284 if (!opts->can_attach) {
2285 mutex_unlock(&opts->dep_lock);
2286 return -ENODEV;
2287 }
2288 mutex_unlock(&opts->dep_lock);
2289 us = usb_gstrings_attach(c->cdev, tcm_strings,
2290 ARRAY_SIZE(tcm_us_strings));
2291 if (IS_ERR(us))
2292 return PTR_ERR(us);
2293 bot_intf_desc.iInterface = us[USB_G_STR_INT_BBB].id;
2294 uasp_intf_desc.iInterface = us[USB_G_STR_INT_UAS].id;
2295
2296 iface = usb_interface_id(c, f);
2297 if (iface < 0)
2298 return iface;
2299
2300 bot_intf_desc.bInterfaceNumber = iface;
2301 uasp_intf_desc.bInterfaceNumber = iface;
2302 fu->iface = iface;
2303 ep = usb_ep_autoconfig(gadget, &uasp_fs_bi_desc);
2304 if (!ep)
2305 goto ep_fail;
2306
2307 fu->ep_in = ep;
2308
2309 ep = usb_ep_autoconfig(gadget, &uasp_fs_bo_desc);
2310 if (!ep)
2311 goto ep_fail;
2312 fu->ep_out = ep;
2313
2314 ep = usb_ep_autoconfig(gadget, &uasp_fs_status_desc);
2315 if (!ep)
2316 goto ep_fail;
2317 fu->ep_status = ep;
2318
2319 ep = usb_ep_autoconfig(gadget, &uasp_fs_cmd_desc);
2320 if (!ep)
2321 goto ep_fail;
2322 fu->ep_cmd = ep;
2323
2324 /* Assume endpoint addresses are the same for both speeds */
2325 uasp_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress;
2326 uasp_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
2327 uasp_status_desc.bEndpointAddress =
2328 uasp_fs_status_desc.bEndpointAddress;
2329 uasp_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
2330
2331 uasp_ss_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress;
2332 uasp_ss_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
2333 uasp_ss_status_desc.bEndpointAddress =
2334 uasp_fs_status_desc.bEndpointAddress;
2335 uasp_ss_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
2336
2337 ret = usb_assign_descriptors(f, uasp_fs_function_desc,
2338 uasp_hs_function_desc, uasp_ss_function_desc,
2339 uasp_ss_function_desc);
2340 if (ret)
2341 goto ep_fail;
2342
2343 return 0;
2344 ep_fail:
2345 pr_err("Can't claim all required eps\n");
2346
2347 return -ENOTSUPP;
2348 }
2349
2350 struct guas_setup_wq {
2351 struct work_struct work;
2352 struct f_uas *fu;
2353 unsigned int alt;
2354 };
2355
tcm_delayed_set_alt(struct work_struct * wq)2356 static void tcm_delayed_set_alt(struct work_struct *wq)
2357 {
2358 struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq,
2359 work);
2360 struct f_uas *fu = work->fu;
2361 int alt = work->alt;
2362
2363 kfree(work);
2364
2365 if (fu->flags & USBG_IS_BOT)
2366 bot_cleanup_old_alt(fu);
2367 if (fu->flags & USBG_IS_UAS)
2368 uasp_cleanup_old_alt(fu);
2369
2370 if (alt == USB_G_ALT_INT_BBB)
2371 bot_set_alt(fu);
2372 else if (alt == USB_G_ALT_INT_UAS)
2373 uasp_set_alt(fu);
2374 usb_composite_setup_continue(fu->function.config->cdev);
2375 }
2376
tcm_get_alt(struct usb_function * f,unsigned intf)2377 static int tcm_get_alt(struct usb_function *f, unsigned intf)
2378 {
2379 struct f_uas *fu = to_f_uas(f);
2380
2381 if (fu->iface != intf)
2382 return -EOPNOTSUPP;
2383
2384 if (fu->flags & USBG_IS_BOT)
2385 return USB_G_ALT_INT_BBB;
2386 else if (fu->flags & USBG_IS_UAS)
2387 return USB_G_ALT_INT_UAS;
2388
2389 return -EOPNOTSUPP;
2390 }
2391
tcm_set_alt(struct usb_function * f,unsigned intf,unsigned alt)2392 static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2393 {
2394 struct f_uas *fu = to_f_uas(f);
2395
2396 if (fu->iface != intf)
2397 return -EOPNOTSUPP;
2398
2399 if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
2400 struct guas_setup_wq *work;
2401
2402 work = kmalloc(sizeof(*work), GFP_ATOMIC);
2403 if (!work)
2404 return -ENOMEM;
2405 INIT_WORK(&work->work, tcm_delayed_set_alt);
2406 work->fu = fu;
2407 work->alt = alt;
2408 schedule_work(&work->work);
2409 return USB_GADGET_DELAYED_STATUS;
2410 }
2411 return -EOPNOTSUPP;
2412 }
2413
tcm_disable(struct usb_function * f)2414 static void tcm_disable(struct usb_function *f)
2415 {
2416 struct f_uas *fu = to_f_uas(f);
2417
2418 if (fu->flags & USBG_IS_UAS)
2419 uasp_cleanup_old_alt(fu);
2420 else if (fu->flags & USBG_IS_BOT)
2421 bot_cleanup_old_alt(fu);
2422 fu->flags = 0;
2423 }
2424
tcm_setup(struct usb_function * f,const struct usb_ctrlrequest * ctrl)2425 static int tcm_setup(struct usb_function *f,
2426 const struct usb_ctrlrequest *ctrl)
2427 {
2428 struct f_uas *fu = to_f_uas(f);
2429
2430 if (!(fu->flags & USBG_IS_BOT))
2431 return -EOPNOTSUPP;
2432
2433 return usbg_bot_setup(f, ctrl);
2434 }
2435
to_f_tcm_opts(struct config_item * item)2436 static inline struct f_tcm_opts *to_f_tcm_opts(struct config_item *item)
2437 {
2438 return container_of(to_config_group(item), struct f_tcm_opts,
2439 func_inst.group);
2440 }
2441
tcm_attr_release(struct config_item * item)2442 static void tcm_attr_release(struct config_item *item)
2443 {
2444 struct f_tcm_opts *opts = to_f_tcm_opts(item);
2445
2446 usb_put_function_instance(&opts->func_inst);
2447 }
2448
2449 static struct configfs_item_operations tcm_item_ops = {
2450 .release = tcm_attr_release,
2451 };
2452
2453 static const struct config_item_type tcm_func_type = {
2454 .ct_item_ops = &tcm_item_ops,
2455 .ct_owner = THIS_MODULE,
2456 };
2457
tcm_free_inst(struct usb_function_instance * f)2458 static void tcm_free_inst(struct usb_function_instance *f)
2459 {
2460 struct f_tcm_opts *opts;
2461 unsigned i;
2462
2463 opts = container_of(f, struct f_tcm_opts, func_inst);
2464
2465 mutex_lock(&tpg_instances_lock);
2466 for (i = 0; i < TPG_INSTANCES; ++i)
2467 if (tpg_instances[i].func_inst == f)
2468 break;
2469 if (i < TPG_INSTANCES)
2470 tpg_instances[i].func_inst = NULL;
2471 mutex_unlock(&tpg_instances_lock);
2472
2473 kfree(opts);
2474 }
2475
tcm_register_callback(struct usb_function_instance * f)2476 static int tcm_register_callback(struct usb_function_instance *f)
2477 {
2478 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2479
2480 mutex_lock(&opts->dep_lock);
2481 opts->can_attach = true;
2482 mutex_unlock(&opts->dep_lock);
2483
2484 return 0;
2485 }
2486
tcm_unregister_callback(struct usb_function_instance * f)2487 static void tcm_unregister_callback(struct usb_function_instance *f)
2488 {
2489 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2490
2491 mutex_lock(&opts->dep_lock);
2492 unregister_gadget_item(opts->
2493 func_inst.group.cg_item.ci_parent->ci_parent);
2494 opts->can_attach = false;
2495 mutex_unlock(&opts->dep_lock);
2496 }
2497
usbg_attach(struct usbg_tpg * tpg)2498 static int usbg_attach(struct usbg_tpg *tpg)
2499 {
2500 struct usb_function_instance *f = tpg->fi;
2501 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2502
2503 if (opts->tcm_register_callback)
2504 return opts->tcm_register_callback(f);
2505
2506 return 0;
2507 }
2508
usbg_detach(struct usbg_tpg * tpg)2509 static void usbg_detach(struct usbg_tpg *tpg)
2510 {
2511 struct usb_function_instance *f = tpg->fi;
2512 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2513
2514 if (opts->tcm_unregister_callback)
2515 opts->tcm_unregister_callback(f);
2516 }
2517
tcm_set_name(struct usb_function_instance * f,const char * name)2518 static int tcm_set_name(struct usb_function_instance *f, const char *name)
2519 {
2520 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2521
2522 pr_debug("tcm: Activating %s\n", name);
2523
2524 mutex_lock(&opts->dep_lock);
2525 opts->ready = true;
2526 mutex_unlock(&opts->dep_lock);
2527
2528 return 0;
2529 }
2530
tcm_alloc_inst(void)2531 static struct usb_function_instance *tcm_alloc_inst(void)
2532 {
2533 struct f_tcm_opts *opts;
2534 int i;
2535
2536
2537 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
2538 if (!opts)
2539 return ERR_PTR(-ENOMEM);
2540
2541 mutex_lock(&tpg_instances_lock);
2542 for (i = 0; i < TPG_INSTANCES; ++i)
2543 if (!tpg_instances[i].func_inst)
2544 break;
2545
2546 if (i == TPG_INSTANCES) {
2547 mutex_unlock(&tpg_instances_lock);
2548 kfree(opts);
2549 return ERR_PTR(-EBUSY);
2550 }
2551 tpg_instances[i].func_inst = &opts->func_inst;
2552 mutex_unlock(&tpg_instances_lock);
2553
2554 mutex_init(&opts->dep_lock);
2555 opts->func_inst.set_inst_name = tcm_set_name;
2556 opts->func_inst.free_func_inst = tcm_free_inst;
2557 opts->tcm_register_callback = tcm_register_callback;
2558 opts->tcm_unregister_callback = tcm_unregister_callback;
2559
2560 config_group_init_type_name(&opts->func_inst.group, "",
2561 &tcm_func_type);
2562
2563 return &opts->func_inst;
2564 }
2565
tcm_free(struct usb_function * f)2566 static void tcm_free(struct usb_function *f)
2567 {
2568 struct f_uas *tcm = to_f_uas(f);
2569
2570 kfree(tcm);
2571 }
2572
tcm_unbind(struct usb_configuration * c,struct usb_function * f)2573 static void tcm_unbind(struct usb_configuration *c, struct usb_function *f)
2574 {
2575 usb_free_all_descriptors(f);
2576 }
2577
tcm_alloc(struct usb_function_instance * fi)2578 static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
2579 {
2580 struct f_uas *fu;
2581 unsigned i;
2582
2583 mutex_lock(&tpg_instances_lock);
2584 for (i = 0; i < TPG_INSTANCES; ++i)
2585 if (tpg_instances[i].func_inst == fi)
2586 break;
2587 if (i == TPG_INSTANCES) {
2588 mutex_unlock(&tpg_instances_lock);
2589 return ERR_PTR(-ENODEV);
2590 }
2591
2592 fu = kzalloc(sizeof(*fu), GFP_KERNEL);
2593 if (!fu) {
2594 mutex_unlock(&tpg_instances_lock);
2595 return ERR_PTR(-ENOMEM);
2596 }
2597
2598 fu->function.name = "Target Function";
2599 fu->function.bind = tcm_bind;
2600 fu->function.unbind = tcm_unbind;
2601 fu->function.set_alt = tcm_set_alt;
2602 fu->function.get_alt = tcm_get_alt;
2603 fu->function.setup = tcm_setup;
2604 fu->function.disable = tcm_disable;
2605 fu->function.free_func = tcm_free;
2606 fu->tpg = tpg_instances[i].tpg;
2607
2608 hash_init(fu->stream_hash);
2609 mutex_unlock(&tpg_instances_lock);
2610
2611 return &fu->function;
2612 }
2613
2614 DECLARE_USB_FUNCTION(tcm, tcm_alloc_inst, tcm_alloc);
2615
tcm_init(void)2616 static int __init tcm_init(void)
2617 {
2618 int ret;
2619
2620 ret = usb_function_register(&tcmusb_func);
2621 if (ret)
2622 return ret;
2623
2624 ret = target_register_template(&usbg_ops);
2625 if (ret)
2626 usb_function_unregister(&tcmusb_func);
2627
2628 return ret;
2629 }
2630 module_init(tcm_init);
2631
tcm_exit(void)2632 static void __exit tcm_exit(void)
2633 {
2634 target_unregister_template(&usbg_ops);
2635 usb_function_unregister(&tcmusb_func);
2636 }
2637 module_exit(tcm_exit);
2638
2639 MODULE_DESCRIPTION("Target based USB-Gadget");
2640 MODULE_LICENSE("GPL");
2641 MODULE_AUTHOR("Sebastian Andrzej Siewior");
2642