xref: /linux/drivers/usb/gadget/function/f_tcm.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Target based USB-Gadget
3  *
4  * UAS protocol handling, target callbacks, configfs handling,
5  * BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
6  *
7  * Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
8  */
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/configfs.h>
14 #include <linux/ctype.h>
15 #include <linux/delay.h>
16 #include <linux/usb/ch9.h>
17 #include <linux/usb/composite.h>
18 #include <linux/usb/gadget.h>
19 #include <linux/usb/storage.h>
20 #include <scsi/scsi_tcq.h>
21 #include <target/target_core_base.h>
22 #include <target/target_core_fabric.h>
23 #include <linux/unaligned.h>
24 
25 #include "tcm.h"
26 #include "u_tcm.h"
27 #include "configfs.h"
28 
29 #define TPG_INSTANCES		1
30 
31 struct tpg_instance {
32 	struct usb_function_instance	*func_inst;
33 	struct usbg_tpg			*tpg;
34 };
35 
36 static struct tpg_instance tpg_instances[TPG_INSTANCES];
37 
38 static DEFINE_MUTEX(tpg_instances_lock);
39 
40 static inline struct f_uas *to_f_uas(struct usb_function *f)
41 {
42 	return container_of(f, struct f_uas, function);
43 }
44 
45 /* Start bot.c code */
46 
47 static int bot_enqueue_cmd_cbw(struct f_uas *fu)
48 {
49 	int ret;
50 
51 	if (fu->flags & USBG_BOT_CMD_PEND)
52 		return 0;
53 
54 	ret = usb_ep_queue(fu->ep_out, fu->cmd[0].req, GFP_ATOMIC);
55 	if (!ret)
56 		fu->flags |= USBG_BOT_CMD_PEND;
57 	return ret;
58 }
59 
60 static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
61 {
62 	struct usbg_cmd *cmd = req->context;
63 	struct f_uas *fu = cmd->fu;
64 
65 	transport_generic_free_cmd(&cmd->se_cmd, 0);
66 	if (req->status == -ESHUTDOWN)
67 		return;
68 
69 	if (req->status < 0)
70 		pr_err("ERR %s(%d)\n", __func__, __LINE__);
71 
72 	/* CSW completed, wait for next CBW */
73 	bot_enqueue_cmd_cbw(fu);
74 }
75 
76 static void bot_enqueue_sense_code(struct f_uas *fu, struct usbg_cmd *cmd)
77 {
78 	struct bulk_cs_wrap *csw = &fu->bot_status.csw;
79 	int ret;
80 	unsigned int csw_stat;
81 
82 	csw_stat = cmd->csw_code;
83 	csw->Tag = cmd->bot_tag;
84 	csw->Status = csw_stat;
85 	fu->bot_status.req->context = cmd;
86 	ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_ATOMIC);
87 	if (ret)
88 		pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
89 }
90 
91 static void bot_err_compl(struct usb_ep *ep, struct usb_request *req)
92 {
93 	struct usbg_cmd *cmd = req->context;
94 	struct f_uas *fu = cmd->fu;
95 
96 	if (req->status < 0)
97 		pr_err("ERR %s(%d)\n", __func__, __LINE__);
98 
99 	if (cmd->data_len) {
100 		if (cmd->data_len > ep->maxpacket) {
101 			req->length = ep->maxpacket;
102 			cmd->data_len -= ep->maxpacket;
103 		} else {
104 			req->length = cmd->data_len;
105 			cmd->data_len = 0;
106 		}
107 
108 		usb_ep_queue(ep, req, GFP_ATOMIC);
109 		return;
110 	}
111 	bot_enqueue_sense_code(fu, cmd);
112 }
113 
114 static void bot_send_bad_status(struct usbg_cmd *cmd)
115 {
116 	struct f_uas *fu = cmd->fu;
117 	struct bulk_cs_wrap *csw = &fu->bot_status.csw;
118 	struct usb_request *req;
119 	struct usb_ep *ep;
120 
121 	csw->Residue = cpu_to_le32(cmd->data_len);
122 
123 	if (cmd->data_len) {
124 		if (cmd->is_read) {
125 			ep = fu->ep_in;
126 			req = fu->bot_req_in;
127 		} else {
128 			ep = fu->ep_out;
129 			req = fu->bot_req_out;
130 		}
131 
132 		if (cmd->data_len > fu->ep_in->maxpacket) {
133 			req->length = ep->maxpacket;
134 			cmd->data_len -= ep->maxpacket;
135 		} else {
136 			req->length = cmd->data_len;
137 			cmd->data_len = 0;
138 		}
139 		req->complete = bot_err_compl;
140 		req->context = cmd;
141 		req->buf = fu->cmd[0].buf;
142 		usb_ep_queue(ep, req, GFP_KERNEL);
143 	} else {
144 		bot_enqueue_sense_code(fu, cmd);
145 	}
146 }
147 
148 static int bot_send_status(struct usbg_cmd *cmd, bool moved_data)
149 {
150 	struct f_uas *fu = cmd->fu;
151 	struct bulk_cs_wrap *csw = &fu->bot_status.csw;
152 	int ret;
153 
154 	if (cmd->se_cmd.scsi_status == SAM_STAT_GOOD) {
155 		if (!moved_data && cmd->data_len) {
156 			/*
157 			 * the host wants to move data, we don't. Fill / empty
158 			 * the pipe and then send the csw with reside set.
159 			 */
160 			cmd->csw_code = US_BULK_STAT_OK;
161 			bot_send_bad_status(cmd);
162 			return 0;
163 		}
164 
165 		csw->Tag = cmd->bot_tag;
166 		csw->Residue = cpu_to_le32(0);
167 		csw->Status = US_BULK_STAT_OK;
168 		fu->bot_status.req->context = cmd;
169 
170 		ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_KERNEL);
171 		if (ret)
172 			pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
173 	} else {
174 		cmd->csw_code = US_BULK_STAT_FAIL;
175 		bot_send_bad_status(cmd);
176 	}
177 	return 0;
178 }
179 
180 /*
181  * Called after command (no data transfer) or after the write (to device)
182  * operation is completed
183  */
184 static int bot_send_status_response(struct usbg_cmd *cmd)
185 {
186 	bool moved_data = false;
187 
188 	if (!cmd->is_read)
189 		moved_data = true;
190 	return bot_send_status(cmd, moved_data);
191 }
192 
193 /* Read request completed, now we have to send the CSW */
194 static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
195 {
196 	struct usbg_cmd *cmd = req->context;
197 
198 	if (req->status < 0)
199 		pr_err("ERR %s(%d)\n", __func__, __LINE__);
200 
201 	if (req->status == -ESHUTDOWN) {
202 		transport_generic_free_cmd(&cmd->se_cmd, 0);
203 		return;
204 	}
205 
206 	bot_send_status(cmd, true);
207 }
208 
209 static int bot_send_read_response(struct usbg_cmd *cmd)
210 {
211 	struct f_uas *fu = cmd->fu;
212 	struct se_cmd *se_cmd = &cmd->se_cmd;
213 	struct usb_gadget *gadget = fuas_to_gadget(fu);
214 	int ret;
215 
216 	if (!cmd->data_len) {
217 		cmd->csw_code = US_BULK_STAT_PHASE;
218 		bot_send_bad_status(cmd);
219 		return 0;
220 	}
221 
222 	if (!gadget->sg_supported) {
223 		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
224 		if (!cmd->data_buf)
225 			return -ENOMEM;
226 
227 		sg_copy_to_buffer(se_cmd->t_data_sg,
228 				se_cmd->t_data_nents,
229 				cmd->data_buf,
230 				se_cmd->data_length);
231 
232 		fu->bot_req_in->buf = cmd->data_buf;
233 	} else {
234 		fu->bot_req_in->buf = NULL;
235 		fu->bot_req_in->num_sgs = se_cmd->t_data_nents;
236 		fu->bot_req_in->sg = se_cmd->t_data_sg;
237 	}
238 
239 	fu->bot_req_in->complete = bot_read_compl;
240 	fu->bot_req_in->length = se_cmd->data_length;
241 	fu->bot_req_in->context = cmd;
242 	ret = usb_ep_queue(fu->ep_in, fu->bot_req_in, GFP_ATOMIC);
243 	if (ret)
244 		pr_err("%s(%d)\n", __func__, __LINE__);
245 	return 0;
246 }
247 
248 static void usbg_data_write_cmpl(struct usb_ep *, struct usb_request *);
249 static int usbg_prepare_w_request(struct usbg_cmd *, struct usb_request *);
250 
251 static int bot_send_write_request(struct usbg_cmd *cmd)
252 {
253 	struct f_uas *fu = cmd->fu;
254 	int ret;
255 
256 	cmd->fu = fu;
257 
258 	if (!cmd->data_len) {
259 		cmd->csw_code = US_BULK_STAT_PHASE;
260 		return -EINVAL;
261 	}
262 
263 	ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
264 	if (ret)
265 		goto cleanup;
266 	ret = usb_ep_queue(fu->ep_out, fu->bot_req_out, GFP_KERNEL);
267 	if (ret)
268 		pr_err("%s(%d)\n", __func__, __LINE__);
269 
270 cleanup:
271 	return ret;
272 }
273 
274 static int bot_submit_command(struct f_uas *, void *, unsigned int);
275 
276 static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
277 {
278 	struct f_uas *fu = req->context;
279 	int ret;
280 
281 	if (req->status == -ESHUTDOWN)
282 		return;
283 
284 	fu->flags &= ~USBG_BOT_CMD_PEND;
285 
286 	if (req->status < 0) {
287 		struct usb_gadget *gadget = fuas_to_gadget(fu);
288 
289 		dev_err(&gadget->dev, "BOT command req err (%d)\n", req->status);
290 		bot_enqueue_cmd_cbw(fu);
291 		return;
292 	}
293 
294 	ret = bot_submit_command(fu, req->buf, req->actual);
295 	if (ret) {
296 		pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
297 		if (!(fu->flags & USBG_BOT_WEDGED))
298 			usb_ep_set_wedge(fu->ep_in);
299 
300 		fu->flags |= USBG_BOT_WEDGED;
301 		bot_enqueue_cmd_cbw(fu);
302 	} else if (fu->flags & USBG_BOT_WEDGED) {
303 		fu->flags &= ~USBG_BOT_WEDGED;
304 		usb_ep_clear_halt(fu->ep_in);
305 	}
306 }
307 
308 static int bot_prepare_reqs(struct f_uas *fu)
309 {
310 	int ret;
311 
312 	fu->bot_req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
313 	if (!fu->bot_req_in)
314 		goto err;
315 
316 	fu->bot_req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
317 	if (!fu->bot_req_out)
318 		goto err_out;
319 
320 	fu->cmd[0].req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
321 	if (!fu->cmd[0].req)
322 		goto err_cmd;
323 
324 	fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
325 	if (!fu->bot_status.req)
326 		goto err_sts;
327 
328 	fu->bot_status.req->buf = &fu->bot_status.csw;
329 	fu->bot_status.req->length = US_BULK_CS_WRAP_LEN;
330 	fu->bot_status.req->complete = bot_status_complete;
331 	fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
332 
333 	fu->cmd[0].buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
334 	if (!fu->cmd[0].buf)
335 		goto err_buf;
336 
337 	fu->cmd[0].req->complete = bot_cmd_complete;
338 	fu->cmd[0].req->buf = fu->cmd[0].buf;
339 	fu->cmd[0].req->length = fu->ep_out->maxpacket;
340 	fu->cmd[0].req->context = fu;
341 
342 	ret = bot_enqueue_cmd_cbw(fu);
343 	if (ret)
344 		goto err_queue;
345 	return 0;
346 err_queue:
347 	kfree(fu->cmd[0].buf);
348 	fu->cmd[0].buf = NULL;
349 err_buf:
350 	usb_ep_free_request(fu->ep_in, fu->bot_status.req);
351 err_sts:
352 	usb_ep_free_request(fu->ep_out, fu->cmd[0].req);
353 	fu->cmd[0].req = NULL;
354 err_cmd:
355 	usb_ep_free_request(fu->ep_out, fu->bot_req_out);
356 	fu->bot_req_out = NULL;
357 err_out:
358 	usb_ep_free_request(fu->ep_in, fu->bot_req_in);
359 	fu->bot_req_in = NULL;
360 err:
361 	pr_err("BOT: endpoint setup failed\n");
362 	return -ENOMEM;
363 }
364 
365 static void bot_cleanup_old_alt(struct f_uas *fu)
366 {
367 	if (!(fu->flags & USBG_ENABLED))
368 		return;
369 
370 	usb_ep_disable(fu->ep_in);
371 	usb_ep_disable(fu->ep_out);
372 
373 	if (!fu->bot_req_in)
374 		return;
375 
376 	usb_ep_free_request(fu->ep_in, fu->bot_req_in);
377 	usb_ep_free_request(fu->ep_out, fu->bot_req_out);
378 	usb_ep_free_request(fu->ep_out, fu->cmd[0].req);
379 	usb_ep_free_request(fu->ep_in, fu->bot_status.req);
380 
381 	kfree(fu->cmd[0].buf);
382 
383 	fu->bot_req_in = NULL;
384 	fu->bot_req_out = NULL;
385 	fu->cmd[0].req = NULL;
386 	fu->bot_status.req = NULL;
387 	fu->cmd[0].buf = NULL;
388 }
389 
390 static void bot_set_alt(struct f_uas *fu)
391 {
392 	struct usb_function *f = &fu->function;
393 	struct usb_gadget *gadget = f->config->cdev->gadget;
394 	int ret;
395 
396 	fu->flags = USBG_IS_BOT;
397 
398 	config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_BBB);
399 	ret = usb_ep_enable(fu->ep_in);
400 	if (ret)
401 		goto err_b_in;
402 
403 	config_ep_by_speed_and_alt(gadget, f, fu->ep_out, USB_G_ALT_INT_BBB);
404 	ret = usb_ep_enable(fu->ep_out);
405 	if (ret)
406 		goto err_b_out;
407 
408 	ret = bot_prepare_reqs(fu);
409 	if (ret)
410 		goto err_wq;
411 	fu->flags |= USBG_ENABLED;
412 	pr_info("Using the BOT protocol\n");
413 	return;
414 err_wq:
415 	usb_ep_disable(fu->ep_out);
416 err_b_out:
417 	usb_ep_disable(fu->ep_in);
418 err_b_in:
419 	fu->flags = USBG_IS_BOT;
420 }
421 
422 static int usbg_bot_setup(struct usb_function *f,
423 		const struct usb_ctrlrequest *ctrl)
424 {
425 	struct f_uas *fu = to_f_uas(f);
426 	struct usb_composite_dev *cdev = f->config->cdev;
427 	u16 w_value = le16_to_cpu(ctrl->wValue);
428 	u16 w_length = le16_to_cpu(ctrl->wLength);
429 	int luns;
430 	u8 *ret_lun;
431 
432 	switch (ctrl->bRequest) {
433 	case US_BULK_GET_MAX_LUN:
434 		if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS |
435 					USB_RECIP_INTERFACE))
436 			return -ENOTSUPP;
437 
438 		if (w_length < 1)
439 			return -EINVAL;
440 		if (w_value != 0)
441 			return -EINVAL;
442 		luns = atomic_read(&fu->tpg->tpg_port_count);
443 		if (!luns) {
444 			pr_err("No LUNs configured?\n");
445 			return -EINVAL;
446 		}
447 		luns--;
448 		if (luns > US_BULK_MAX_LUN_LIMIT) {
449 			pr_info_once("Limiting the number of luns to 16\n");
450 			luns = US_BULK_MAX_LUN_LIMIT;
451 		}
452 		ret_lun = cdev->req->buf;
453 		*ret_lun = luns;
454 		cdev->req->length = 1;
455 		return usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
456 
457 	case US_BULK_RESET_REQUEST:
458 		/* XXX maybe we should remove previous requests for IN + OUT */
459 		if (fu->flags & USBG_BOT_WEDGED) {
460 			fu->flags &= ~USBG_BOT_WEDGED;
461 			usb_ep_clear_halt(fu->ep_in);
462 		}
463 
464 		bot_enqueue_cmd_cbw(fu);
465 		return 0;
466 	}
467 	return -ENOTSUPP;
468 }
469 
470 /* Start uas.c code */
471 
472 static int tcm_to_uasp_response(enum tcm_tmrsp_table code)
473 {
474 	switch (code) {
475 	case TMR_FUNCTION_FAILED:
476 		return RC_TMF_FAILED;
477 	case TMR_FUNCTION_COMPLETE:
478 	case TMR_TASK_DOES_NOT_EXIST:
479 		return RC_TMF_COMPLETE;
480 	case TMR_LUN_DOES_NOT_EXIST:
481 		return RC_INCORRECT_LUN;
482 	case TMR_FUNCTION_REJECTED:
483 	case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
484 	default:
485 		return RC_TMF_NOT_SUPPORTED;
486 	}
487 }
488 
489 static unsigned char uasp_to_tcm_func(int code)
490 {
491 	switch (code) {
492 	case TMF_ABORT_TASK:
493 		return TMR_ABORT_TASK;
494 	case TMF_ABORT_TASK_SET:
495 		return TMR_ABORT_TASK_SET;
496 	case TMF_CLEAR_TASK_SET:
497 		return TMR_CLEAR_TASK_SET;
498 	case TMF_LOGICAL_UNIT_RESET:
499 		return TMR_LUN_RESET;
500 	case TMF_CLEAR_ACA:
501 		return TMR_CLEAR_ACA;
502 	case TMF_I_T_NEXUS_RESET:
503 	case TMF_QUERY_TASK:
504 	case TMF_QUERY_TASK_SET:
505 	case TMF_QUERY_ASYNC_EVENT:
506 	default:
507 		return TMR_UNKNOWN;
508 	}
509 }
510 
511 static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
512 {
513 	/* We have either all three allocated or none */
514 	if (!stream->req_in)
515 		return;
516 
517 	usb_ep_free_request(fu->ep_in, stream->req_in);
518 	usb_ep_free_request(fu->ep_out, stream->req_out);
519 	usb_ep_free_request(fu->ep_status, stream->req_status);
520 
521 	stream->req_in = NULL;
522 	stream->req_out = NULL;
523 	stream->req_status = NULL;
524 }
525 
526 static void uasp_free_cmdreq(struct f_uas *fu)
527 {
528 	int i;
529 
530 	for (i = 0; i < USBG_NUM_CMDS; i++) {
531 		usb_ep_free_request(fu->ep_cmd, fu->cmd[i].req);
532 		kfree(fu->cmd[i].buf);
533 		fu->cmd[i].req = NULL;
534 		fu->cmd[i].buf = NULL;
535 	}
536 }
537 
538 static void uasp_cleanup_old_alt(struct f_uas *fu)
539 {
540 	int i;
541 
542 	if (!(fu->flags & USBG_ENABLED))
543 		return;
544 
545 	usb_ep_disable(fu->ep_in);
546 	usb_ep_disable(fu->ep_out);
547 	usb_ep_disable(fu->ep_status);
548 	usb_ep_disable(fu->ep_cmd);
549 
550 	for (i = 0; i < USBG_NUM_CMDS; i++)
551 		uasp_cleanup_one_stream(fu, &fu->stream[i]);
552 	uasp_free_cmdreq(fu);
553 }
554 
555 static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
556 
557 static int uasp_prepare_r_request(struct usbg_cmd *cmd)
558 {
559 	struct se_cmd *se_cmd = &cmd->se_cmd;
560 	struct f_uas *fu = cmd->fu;
561 	struct usb_gadget *gadget = fuas_to_gadget(fu);
562 	struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
563 
564 	if (!gadget->sg_supported) {
565 		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
566 		if (!cmd->data_buf)
567 			return -ENOMEM;
568 
569 		sg_copy_to_buffer(se_cmd->t_data_sg,
570 				se_cmd->t_data_nents,
571 				cmd->data_buf,
572 				se_cmd->data_length);
573 
574 		stream->req_in->buf = cmd->data_buf;
575 	} else {
576 		stream->req_in->buf = NULL;
577 		stream->req_in->num_sgs = se_cmd->t_data_nents;
578 		stream->req_in->sg = se_cmd->t_data_sg;
579 	}
580 
581 	stream->req_in->is_last = 1;
582 	stream->req_in->stream_id = cmd->tag;
583 	stream->req_in->complete = uasp_status_data_cmpl;
584 	stream->req_in->length = se_cmd->data_length;
585 	stream->req_in->context = cmd;
586 
587 	cmd->state = UASP_SEND_STATUS;
588 	return 0;
589 }
590 
591 static void uasp_prepare_status(struct usbg_cmd *cmd)
592 {
593 	struct se_cmd *se_cmd = &cmd->se_cmd;
594 	struct sense_iu *iu = &cmd->sense_iu;
595 	struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
596 
597 	cmd->state = UASP_QUEUE_COMMAND;
598 	iu->iu_id = IU_ID_STATUS;
599 	iu->tag = cpu_to_be16(cmd->tag);
600 
601 	/*
602 	 * iu->status_qual = cpu_to_be16(STATUS QUALIFIER SAM-4. Where R U?);
603 	 */
604 	iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
605 	iu->status = se_cmd->scsi_status;
606 	stream->req_status->is_last = 1;
607 	stream->req_status->stream_id = cmd->tag;
608 	stream->req_status->context = cmd;
609 	stream->req_status->length = se_cmd->scsi_sense_length + 16;
610 	stream->req_status->buf = iu;
611 	stream->req_status->complete = uasp_status_data_cmpl;
612 }
613 
614 static void uasp_prepare_response(struct usbg_cmd *cmd)
615 {
616 	struct se_cmd *se_cmd = &cmd->se_cmd;
617 	struct response_iu *rsp_iu = &cmd->response_iu;
618 	struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
619 
620 	cmd->state = UASP_QUEUE_COMMAND;
621 	rsp_iu->iu_id = IU_ID_RESPONSE;
622 	rsp_iu->tag = cpu_to_be16(cmd->tag);
623 
624 	if (cmd->tmr_rsp != RC_RESPONSE_UNKNOWN)
625 		rsp_iu->response_code = cmd->tmr_rsp;
626 	else
627 		rsp_iu->response_code =
628 			tcm_to_uasp_response(se_cmd->se_tmr_req->response);
629 
630 	/*
631 	 * The UASP driver must support all the task management functions listed
632 	 * in Table 20 of UAS-r04. To remain compliant while indicate that the
633 	 * TMR did not go through, report RC_TMF_FAILED instead of
634 	 * RC_TMF_NOT_SUPPORTED and print a warning to the user.
635 	 */
636 	switch (cmd->tmr_func) {
637 	case TMF_ABORT_TASK:
638 	case TMF_ABORT_TASK_SET:
639 	case TMF_CLEAR_TASK_SET:
640 	case TMF_LOGICAL_UNIT_RESET:
641 	case TMF_CLEAR_ACA:
642 	case TMF_I_T_NEXUS_RESET:
643 	case TMF_QUERY_TASK:
644 	case TMF_QUERY_TASK_SET:
645 	case TMF_QUERY_ASYNC_EVENT:
646 		if (rsp_iu->response_code == RC_TMF_NOT_SUPPORTED) {
647 			struct usb_gadget *gadget = fuas_to_gadget(cmd->fu);
648 
649 			dev_warn(&gadget->dev, "TMF function %d not supported\n",
650 				 cmd->tmr_func);
651 			rsp_iu->response_code = RC_TMF_FAILED;
652 		}
653 		break;
654 	default:
655 		break;
656 	}
657 
658 	stream->req_status->is_last = 1;
659 	stream->req_status->stream_id = cmd->tag;
660 	stream->req_status->context = cmd;
661 	stream->req_status->length = sizeof(struct response_iu);
662 	stream->req_status->buf = rsp_iu;
663 	stream->req_status->complete = uasp_status_data_cmpl;
664 }
665 
666 static void usbg_release_cmd(struct se_cmd *se_cmd);
667 static int uasp_send_tm_response(struct usbg_cmd *cmd);
668 
669 static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
670 {
671 	struct usbg_cmd *cmd = req->context;
672 	struct f_uas *fu = cmd->fu;
673 	struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
674 	int ret;
675 
676 	if (req->status == -ESHUTDOWN)
677 		goto cleanup;
678 
679 	switch (cmd->state) {
680 	case UASP_SEND_DATA:
681 		ret = uasp_prepare_r_request(cmd);
682 		if (ret)
683 			goto cleanup;
684 		ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
685 		if (ret)
686 			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
687 		break;
688 
689 	case UASP_RECEIVE_DATA:
690 		ret = usbg_prepare_w_request(cmd, stream->req_out);
691 		if (ret)
692 			goto cleanup;
693 		ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
694 		if (ret)
695 			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
696 		break;
697 
698 	case UASP_SEND_STATUS:
699 		uasp_prepare_status(cmd);
700 		ret = usb_ep_queue(fu->ep_status, stream->req_status,
701 				GFP_ATOMIC);
702 		if (ret)
703 			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
704 		break;
705 
706 	case UASP_QUEUE_COMMAND:
707 		/*
708 		 * Overlapped command detected and cancelled.
709 		 * So send overlapped attempted status.
710 		 */
711 		if (cmd->tmr_rsp == RC_OVERLAPPED_TAG &&
712 		    req->status == -ECONNRESET) {
713 			uasp_send_tm_response(cmd);
714 			return;
715 		}
716 
717 		hash_del(&stream->node);
718 
719 		/*
720 		 * If no command submitted to target core here, just free the
721 		 * bitmap index. This is for the cases where f_tcm handles
722 		 * status response instead of the target core.
723 		 */
724 		if (cmd->tmr_rsp != RC_OVERLAPPED_TAG &&
725 		    cmd->tmr_rsp != RC_RESPONSE_UNKNOWN) {
726 			struct se_session *se_sess;
727 
728 			se_sess = fu->tpg->tpg_nexus->tvn_se_sess;
729 			sbitmap_queue_clear(&se_sess->sess_tag_pool,
730 					    cmd->se_cmd.map_tag,
731 					    cmd->se_cmd.map_cpu);
732 		} else {
733 			transport_generic_free_cmd(&cmd->se_cmd, 0);
734 		}
735 
736 		usb_ep_queue(fu->ep_cmd, cmd->req, GFP_ATOMIC);
737 		complete(&stream->cmd_completion);
738 		break;
739 
740 	default:
741 		BUG();
742 	}
743 	return;
744 
745 cleanup:
746 	hash_del(&stream->node);
747 	transport_generic_free_cmd(&cmd->se_cmd, 0);
748 }
749 
750 static int uasp_send_status_response(struct usbg_cmd *cmd)
751 {
752 	struct f_uas *fu = cmd->fu;
753 	struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
754 	struct sense_iu *iu = &cmd->sense_iu;
755 
756 	iu->tag = cpu_to_be16(cmd->tag);
757 	cmd->fu = fu;
758 	uasp_prepare_status(cmd);
759 	return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
760 }
761 
762 static int uasp_send_tm_response(struct usbg_cmd *cmd)
763 {
764 	struct f_uas *fu = cmd->fu;
765 	struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
766 	struct response_iu *iu = &cmd->response_iu;
767 
768 	iu->tag = cpu_to_be16(cmd->tag);
769 	cmd->fu = fu;
770 	uasp_prepare_response(cmd);
771 	return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
772 }
773 
774 static int uasp_send_read_response(struct usbg_cmd *cmd)
775 {
776 	struct f_uas *fu = cmd->fu;
777 	struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
778 	struct sense_iu *iu = &cmd->sense_iu;
779 	int ret;
780 
781 	cmd->fu = fu;
782 
783 	iu->tag = cpu_to_be16(cmd->tag);
784 	if (fu->flags & USBG_USE_STREAMS) {
785 
786 		ret = uasp_prepare_r_request(cmd);
787 		if (ret)
788 			goto out;
789 		ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
790 		if (ret) {
791 			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
792 			kfree(cmd->data_buf);
793 			cmd->data_buf = NULL;
794 		}
795 
796 	} else {
797 
798 		iu->iu_id = IU_ID_READ_READY;
799 		iu->tag = cpu_to_be16(cmd->tag);
800 
801 		stream->req_status->complete = uasp_status_data_cmpl;
802 		stream->req_status->context = cmd;
803 
804 		cmd->state = UASP_SEND_DATA;
805 		stream->req_status->buf = iu;
806 		stream->req_status->length = sizeof(struct iu);
807 
808 		ret = usb_ep_queue(fu->ep_status, stream->req_status,
809 				GFP_ATOMIC);
810 		if (ret)
811 			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
812 	}
813 out:
814 	return ret;
815 }
816 
817 static int uasp_send_write_request(struct usbg_cmd *cmd)
818 {
819 	struct f_uas *fu = cmd->fu;
820 	struct se_cmd *se_cmd = &cmd->se_cmd;
821 	struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
822 	struct sense_iu *iu = &cmd->sense_iu;
823 	int ret;
824 
825 	cmd->fu = fu;
826 
827 	iu->tag = cpu_to_be16(cmd->tag);
828 
829 	if (fu->flags & USBG_USE_STREAMS) {
830 
831 		ret = usbg_prepare_w_request(cmd, stream->req_out);
832 		if (ret)
833 			goto cleanup;
834 		ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
835 		if (ret)
836 			pr_err("%s(%d)\n", __func__, __LINE__);
837 
838 	} else {
839 
840 		iu->iu_id = IU_ID_WRITE_READY;
841 		iu->tag = cpu_to_be16(cmd->tag);
842 
843 		stream->req_status->complete = uasp_status_data_cmpl;
844 		stream->req_status->context = cmd;
845 
846 		cmd->state = UASP_RECEIVE_DATA;
847 		stream->req_status->buf = iu;
848 		stream->req_status->length = sizeof(struct iu);
849 
850 		ret = usb_ep_queue(fu->ep_status, stream->req_status,
851 				GFP_ATOMIC);
852 		if (ret)
853 			pr_err("%s(%d)\n", __func__, __LINE__);
854 	}
855 
856 cleanup:
857 	return ret;
858 }
859 
860 static int usbg_submit_command(struct f_uas *, struct usb_request *);
861 
862 static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
863 {
864 	struct f_uas *fu = req->context;
865 
866 	if (req->status == -ESHUTDOWN)
867 		return;
868 
869 	if (req->status < 0) {
870 		usb_ep_queue(fu->ep_cmd, req, GFP_ATOMIC);
871 		return;
872 	}
873 
874 	usbg_submit_command(fu, req);
875 }
876 
877 static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
878 {
879 	init_completion(&stream->cmd_completion);
880 
881 	stream->req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
882 	if (!stream->req_in)
883 		goto out;
884 
885 	stream->req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
886 	if (!stream->req_out)
887 		goto err_out;
888 
889 	stream->req_status = usb_ep_alloc_request(fu->ep_status, GFP_KERNEL);
890 	if (!stream->req_status)
891 		goto err_sts;
892 
893 	return 0;
894 
895 err_sts:
896 	usb_ep_free_request(fu->ep_out, stream->req_out);
897 	stream->req_out = NULL;
898 err_out:
899 	usb_ep_free_request(fu->ep_in, stream->req_in);
900 	stream->req_in = NULL;
901 out:
902 	return -ENOMEM;
903 }
904 
905 static int uasp_alloc_cmd(struct f_uas *fu, int i)
906 {
907 	fu->cmd[i].req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
908 	if (!fu->cmd[i].req)
909 		goto err;
910 
911 	fu->cmd[i].buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
912 	if (!fu->cmd[i].buf)
913 		goto err_buf;
914 
915 	fu->cmd[i].req->complete = uasp_cmd_complete;
916 	fu->cmd[i].req->buf = fu->cmd[i].buf;
917 	fu->cmd[i].req->length = fu->ep_cmd->maxpacket;
918 	fu->cmd[i].req->context = fu;
919 	return 0;
920 
921 err_buf:
922 	usb_ep_free_request(fu->ep_cmd, fu->cmd[i].req);
923 err:
924 	return -ENOMEM;
925 }
926 
927 static int uasp_prepare_reqs(struct f_uas *fu)
928 {
929 	int ret;
930 	int i;
931 
932 	for (i = 0; i < USBG_NUM_CMDS; i++) {
933 		ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
934 		if (ret)
935 			goto err_cleanup;
936 	}
937 
938 	for (i = 0; i < USBG_NUM_CMDS; i++) {
939 		ret = uasp_alloc_cmd(fu, i);
940 		if (ret)
941 			goto err_free_stream;
942 
943 		ret = usb_ep_queue(fu->ep_cmd, fu->cmd[i].req, GFP_ATOMIC);
944 		if (ret)
945 			goto err_free_stream;
946 	}
947 
948 	return 0;
949 
950 err_free_stream:
951 	uasp_free_cmdreq(fu);
952 
953 err_cleanup:
954 	if (i) {
955 		do {
956 			uasp_cleanup_one_stream(fu, &fu->stream[i - 1]);
957 			i--;
958 		} while (i);
959 	}
960 	pr_err("UASP: endpoint setup failed\n");
961 	return ret;
962 }
963 
964 static void uasp_set_alt(struct f_uas *fu)
965 {
966 	struct usb_function *f = &fu->function;
967 	struct usb_gadget *gadget = f->config->cdev->gadget;
968 	int ret;
969 
970 	fu->flags = USBG_IS_UAS;
971 
972 	if (gadget->speed >= USB_SPEED_SUPER)
973 		fu->flags |= USBG_USE_STREAMS;
974 
975 	config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_UAS);
976 	ret = usb_ep_enable(fu->ep_in);
977 	if (ret)
978 		goto err_b_in;
979 
980 	config_ep_by_speed_and_alt(gadget, f, fu->ep_out, USB_G_ALT_INT_UAS);
981 	ret = usb_ep_enable(fu->ep_out);
982 	if (ret)
983 		goto err_b_out;
984 
985 	config_ep_by_speed_and_alt(gadget, f, fu->ep_cmd, USB_G_ALT_INT_UAS);
986 	ret = usb_ep_enable(fu->ep_cmd);
987 	if (ret)
988 		goto err_cmd;
989 	config_ep_by_speed_and_alt(gadget, f, fu->ep_status, USB_G_ALT_INT_UAS);
990 	ret = usb_ep_enable(fu->ep_status);
991 	if (ret)
992 		goto err_status;
993 
994 	ret = uasp_prepare_reqs(fu);
995 	if (ret)
996 		goto err_wq;
997 	fu->flags |= USBG_ENABLED;
998 
999 	pr_info("Using the UAS protocol\n");
1000 	return;
1001 err_wq:
1002 	usb_ep_disable(fu->ep_status);
1003 err_status:
1004 	usb_ep_disable(fu->ep_cmd);
1005 err_cmd:
1006 	usb_ep_disable(fu->ep_out);
1007 err_b_out:
1008 	usb_ep_disable(fu->ep_in);
1009 err_b_in:
1010 	fu->flags = 0;
1011 }
1012 
1013 static int get_cmd_dir(const unsigned char *cdb)
1014 {
1015 	int ret;
1016 
1017 	switch (cdb[0]) {
1018 	case READ_6:
1019 	case READ_10:
1020 	case READ_12:
1021 	case READ_16:
1022 	case INQUIRY:
1023 	case MODE_SENSE:
1024 	case MODE_SENSE_10:
1025 	case SERVICE_ACTION_IN_16:
1026 	case MAINTENANCE_IN:
1027 	case PERSISTENT_RESERVE_IN:
1028 	case SECURITY_PROTOCOL_IN:
1029 	case ACCESS_CONTROL_IN:
1030 	case REPORT_LUNS:
1031 	case READ_BLOCK_LIMITS:
1032 	case READ_POSITION:
1033 	case READ_CAPACITY:
1034 	case READ_TOC:
1035 	case READ_FORMAT_CAPACITIES:
1036 	case REQUEST_SENSE:
1037 	case ATA_12:
1038 	case ATA_16:
1039 		ret = DMA_FROM_DEVICE;
1040 		break;
1041 
1042 	case WRITE_6:
1043 	case WRITE_10:
1044 	case WRITE_12:
1045 	case WRITE_16:
1046 	case MODE_SELECT:
1047 	case MODE_SELECT_10:
1048 	case WRITE_VERIFY:
1049 	case WRITE_VERIFY_12:
1050 	case PERSISTENT_RESERVE_OUT:
1051 	case MAINTENANCE_OUT:
1052 	case SECURITY_PROTOCOL_OUT:
1053 	case ACCESS_CONTROL_OUT:
1054 		ret = DMA_TO_DEVICE;
1055 		break;
1056 	case ALLOW_MEDIUM_REMOVAL:
1057 	case TEST_UNIT_READY:
1058 	case SYNCHRONIZE_CACHE:
1059 	case START_STOP:
1060 	case ERASE:
1061 	case REZERO_UNIT:
1062 	case SEEK_10:
1063 	case SPACE:
1064 	case VERIFY:
1065 	case WRITE_FILEMARKS:
1066 		ret = DMA_NONE;
1067 		break;
1068 	default:
1069 #define CMD_DIR_MSG "target: Unknown data direction for SCSI Opcode 0x%02x\n"
1070 		pr_warn(CMD_DIR_MSG, cdb[0]);
1071 #undef CMD_DIR_MSG
1072 		ret = -EINVAL;
1073 	}
1074 	return ret;
1075 }
1076 
1077 static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
1078 {
1079 	struct usbg_cmd *cmd = req->context;
1080 	struct se_cmd *se_cmd = &cmd->se_cmd;
1081 
1082 	cmd->state = UASP_QUEUE_COMMAND;
1083 
1084 	if (req->status == -ESHUTDOWN) {
1085 		struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
1086 
1087 		hash_del(&stream->node);
1088 		target_put_sess_cmd(se_cmd);
1089 		transport_generic_free_cmd(&cmd->se_cmd, 0);
1090 		return;
1091 	}
1092 
1093 	if (req->status) {
1094 		pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
1095 		goto cleanup;
1096 	}
1097 
1098 	if (req->num_sgs == 0) {
1099 		sg_copy_from_buffer(se_cmd->t_data_sg,
1100 				se_cmd->t_data_nents,
1101 				cmd->data_buf,
1102 				se_cmd->data_length);
1103 	}
1104 
1105 	cmd->flags |= USBG_CMD_PENDING_DATA_WRITE;
1106 	queue_work(cmd->fu->tpg->workqueue, &cmd->work);
1107 	return;
1108 
1109 cleanup:
1110 	target_put_sess_cmd(se_cmd);
1111 
1112 	/* Command was aborted due to overlapped tag */
1113 	if (cmd->state == UASP_QUEUE_COMMAND &&
1114 	    cmd->tmr_rsp == RC_OVERLAPPED_TAG) {
1115 		uasp_send_tm_response(cmd);
1116 		return;
1117 	}
1118 
1119 	transport_send_check_condition_and_sense(se_cmd,
1120 			TCM_CHECK_CONDITION_ABORT_CMD, 0);
1121 }
1122 
1123 static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
1124 {
1125 	struct se_cmd *se_cmd = &cmd->se_cmd;
1126 	struct f_uas *fu = cmd->fu;
1127 	struct usb_gadget *gadget = fuas_to_gadget(fu);
1128 
1129 	if (!gadget->sg_supported) {
1130 		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
1131 		if (!cmd->data_buf)
1132 			return -ENOMEM;
1133 
1134 		req->buf = cmd->data_buf;
1135 	} else {
1136 		req->buf = NULL;
1137 		req->num_sgs = se_cmd->t_data_nents;
1138 		req->sg = se_cmd->t_data_sg;
1139 	}
1140 
1141 	req->is_last = 1;
1142 	req->stream_id = cmd->tag;
1143 	req->complete = usbg_data_write_cmpl;
1144 	req->length = se_cmd->data_length;
1145 	req->context = cmd;
1146 
1147 	cmd->state = UASP_SEND_STATUS;
1148 	return 0;
1149 }
1150 
1151 static int usbg_send_status_response(struct se_cmd *se_cmd)
1152 {
1153 	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1154 			se_cmd);
1155 	struct f_uas *fu = cmd->fu;
1156 
1157 	if (fu->flags & USBG_IS_BOT)
1158 		return bot_send_status_response(cmd);
1159 	else
1160 		return uasp_send_status_response(cmd);
1161 }
1162 
1163 static int usbg_send_write_request(struct se_cmd *se_cmd)
1164 {
1165 	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1166 			se_cmd);
1167 	struct f_uas *fu = cmd->fu;
1168 
1169 	if (fu->flags & USBG_IS_BOT)
1170 		return bot_send_write_request(cmd);
1171 	else
1172 		return uasp_send_write_request(cmd);
1173 }
1174 
1175 static int usbg_send_read_response(struct se_cmd *se_cmd)
1176 {
1177 	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1178 			se_cmd);
1179 	struct f_uas *fu = cmd->fu;
1180 
1181 	if (fu->flags & USBG_IS_BOT)
1182 		return bot_send_read_response(cmd);
1183 	else
1184 		return uasp_send_read_response(cmd);
1185 }
1186 
1187 static void usbg_aborted_task(struct se_cmd *se_cmd);
1188 
1189 static void usbg_submit_tmr(struct usbg_cmd *cmd)
1190 {
1191 	struct se_session *se_sess;
1192 	struct se_cmd *se_cmd;
1193 	int flags = TARGET_SCF_ACK_KREF;
1194 
1195 	se_cmd = &cmd->se_cmd;
1196 	se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
1197 
1198 	target_submit_tmr(se_cmd, se_sess,
1199 			  cmd->response_iu.add_response_info,
1200 			  cmd->unpacked_lun, NULL, uasp_to_tcm_func(cmd->tmr_func),
1201 			  GFP_ATOMIC, cmd->tag, flags);
1202 }
1203 
1204 static void usbg_submit_cmd(struct usbg_cmd *cmd)
1205 {
1206 	struct se_cmd *se_cmd;
1207 	struct tcm_usbg_nexus *tv_nexus;
1208 	struct usbg_tpg *tpg;
1209 	int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
1210 
1211 	/*
1212 	 * Note: each command will spawn its own process, and each stage of the
1213 	 * command is processed sequentially. Should this no longer be the case,
1214 	 * locking is needed.
1215 	 */
1216 	if (cmd->flags & USBG_CMD_PENDING_DATA_WRITE) {
1217 		target_execute_cmd(&cmd->se_cmd);
1218 		cmd->flags &= ~USBG_CMD_PENDING_DATA_WRITE;
1219 		return;
1220 	}
1221 
1222 	se_cmd = &cmd->se_cmd;
1223 	tpg = cmd->fu->tpg;
1224 	tv_nexus = tpg->tpg_nexus;
1225 	dir = get_cmd_dir(cmd->cmd_buf);
1226 	if (dir < 0)
1227 		goto out;
1228 
1229 	target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
1230 			  cmd->sense_iu.sense, cmd->unpacked_lun, cmd->data_len,
1231 			  cmd->prio_attr, dir, flags);
1232 
1233 	return;
1234 
1235 out:
1236 	__target_init_cmd(se_cmd,
1237 			  tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
1238 			  tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
1239 			  cmd->prio_attr, cmd->sense_iu.sense,
1240 			  cmd->unpacked_lun, NULL);
1241 	transport_send_check_condition_and_sense(se_cmd,
1242 			TCM_UNSUPPORTED_SCSI_OPCODE, 0);
1243 }
1244 
1245 static void usbg_cmd_work(struct work_struct *work)
1246 {
1247 	struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
1248 
1249 	/*
1250 	 * Failure is detected by f_tcm here. Skip submitting the command to the
1251 	 * target core if we already know the failing response and send the usb
1252 	 * response to the host directly.
1253 	 */
1254 	if (cmd->tmr_rsp != RC_RESPONSE_UNKNOWN)
1255 		goto skip;
1256 
1257 	if (cmd->tmr_func)
1258 		usbg_submit_tmr(cmd);
1259 	else
1260 		usbg_submit_cmd(cmd);
1261 
1262 	return;
1263 
1264 skip:
1265 	if (cmd->tmr_rsp == RC_OVERLAPPED_TAG) {
1266 		struct f_uas *fu = cmd->fu;
1267 		struct se_session *se_sess;
1268 		struct uas_stream *stream = NULL;
1269 		struct hlist_node *tmp;
1270 		struct usbg_cmd *active_cmd = NULL;
1271 
1272 		se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
1273 
1274 		hash_for_each_possible_safe(fu->stream_hash, stream, tmp, node, cmd->tag) {
1275 			int i = stream - &fu->stream[0];
1276 
1277 			active_cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[i];
1278 			if (active_cmd->tag == cmd->tag)
1279 				break;
1280 		}
1281 
1282 		/* Sanity check */
1283 		if (!stream || (active_cmd && active_cmd->tag != cmd->tag)) {
1284 			usbg_submit_command(cmd->fu, cmd->req);
1285 			return;
1286 		}
1287 
1288 		reinit_completion(&stream->cmd_completion);
1289 
1290 		/*
1291 		 * A UASP command consists of the command, data, and status
1292 		 * stages, each operating sequentially from different endpoints.
1293 		 *
1294 		 * Each USB endpoint operates independently, and depending on
1295 		 * hardware implementation, a completion callback for a transfer
1296 		 * from one endpoint may not reflect the order of completion on
1297 		 * the wire. This is particularly true for devices with
1298 		 * endpoints that have independent interrupts and event buffers.
1299 		 *
1300 		 * The driver must still detect misbehaving hosts and respond
1301 		 * with an overlap status. To reduce false overlap failures,
1302 		 * allow the active and matching stream ID a brief 1ms to
1303 		 * complete before responding with an overlap command failure.
1304 		 * Overlap failure should be rare.
1305 		 */
1306 		wait_for_completion_timeout(&stream->cmd_completion, msecs_to_jiffies(1));
1307 
1308 		/* If the previous stream is completed, retry the command. */
1309 		if (!hash_hashed(&stream->node)) {
1310 			usbg_submit_command(cmd->fu, cmd->req);
1311 			return;
1312 		}
1313 
1314 		/*
1315 		 * The command isn't submitted to the target core, so we're safe
1316 		 * to remove the bitmap index from the session tag pool.
1317 		 */
1318 		sbitmap_queue_clear(&se_sess->sess_tag_pool,
1319 				    cmd->se_cmd.map_tag,
1320 				    cmd->se_cmd.map_cpu);
1321 
1322 		/*
1323 		 * Overlap command tag detected. Cancel any pending transfer of
1324 		 * the command submitted to target core.
1325 		 */
1326 		active_cmd->tmr_rsp = RC_OVERLAPPED_TAG;
1327 		usbg_aborted_task(&active_cmd->se_cmd);
1328 
1329 		/* Send the response after the transfer is aborted. */
1330 		return;
1331 	}
1332 
1333 	uasp_send_tm_response(cmd);
1334 }
1335 
1336 static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
1337 		struct tcm_usbg_nexus *tv_nexus, u32 scsi_tag)
1338 {
1339 	struct se_session *se_sess = tv_nexus->tvn_se_sess;
1340 	struct usbg_cmd *cmd;
1341 	int tag, cpu;
1342 
1343 	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
1344 	if (tag < 0)
1345 		return ERR_PTR(-ENOMEM);
1346 
1347 	cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
1348 	memset(cmd, 0, sizeof(*cmd));
1349 	cmd->se_cmd.map_tag = tag;
1350 	cmd->se_cmd.map_cpu = cpu;
1351 	cmd->se_cmd.cpuid = cpu;
1352 	cmd->se_cmd.tag = cmd->tag = scsi_tag;
1353 	cmd->fu = fu;
1354 
1355 	return cmd;
1356 }
1357 
1358 static void usbg_release_cmd(struct se_cmd *);
1359 
1360 static int usbg_submit_command(struct f_uas *fu, struct usb_request *req)
1361 {
1362 	struct iu *iu = req->buf;
1363 	struct usbg_cmd *cmd;
1364 	struct usbg_tpg *tpg = fu->tpg;
1365 	struct tcm_usbg_nexus *tv_nexus;
1366 	struct uas_stream *stream;
1367 	struct hlist_node *tmp;
1368 	struct command_iu *cmd_iu;
1369 	u32 cmd_len;
1370 	u16 scsi_tag;
1371 
1372 	tv_nexus = tpg->tpg_nexus;
1373 	if (!tv_nexus) {
1374 		pr_err("Missing nexus, ignoring command\n");
1375 		return -EINVAL;
1376 	}
1377 
1378 	scsi_tag = be16_to_cpup(&iu->tag);
1379 	cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
1380 	if (IS_ERR(cmd)) {
1381 		pr_err("usbg_get_cmd failed\n");
1382 		return -ENOMEM;
1383 	}
1384 
1385 	cmd->req = req;
1386 	cmd->fu = fu;
1387 	cmd->tag = scsi_tag;
1388 	cmd->se_cmd.tag = scsi_tag;
1389 	cmd->tmr_func = 0;
1390 	cmd->tmr_rsp = RC_RESPONSE_UNKNOWN;
1391 	cmd->flags = 0;
1392 	cmd->data_len = 0;
1393 
1394 	cmd_iu = (struct command_iu *)iu;
1395 
1396 	/* Command and Task Management IUs share the same LUN offset */
1397 	cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
1398 
1399 	if (iu->iu_id != IU_ID_COMMAND && iu->iu_id != IU_ID_TASK_MGMT) {
1400 		cmd->tmr_rsp = RC_INVALID_INFO_UNIT;
1401 		goto skip;
1402 	}
1403 
1404 	hash_for_each_possible_safe(fu->stream_hash, stream, tmp, node, scsi_tag) {
1405 		struct usbg_cmd *active_cmd;
1406 		struct se_session *se_sess;
1407 		int i = stream - &fu->stream[0];
1408 
1409 		se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
1410 		active_cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[i];
1411 
1412 		if (active_cmd->tag == scsi_tag) {
1413 			cmd->tmr_rsp = RC_OVERLAPPED_TAG;
1414 			goto skip;
1415 		}
1416 	}
1417 
1418 	stream = &fu->stream[cmd->se_cmd.map_tag];
1419 	hash_add(fu->stream_hash, &stream->node, scsi_tag);
1420 
1421 	if (iu->iu_id == IU_ID_TASK_MGMT) {
1422 		struct task_mgmt_iu *tm_iu;
1423 
1424 		tm_iu = (struct task_mgmt_iu *)iu;
1425 		cmd->tmr_func = tm_iu->function;
1426 		goto skip;
1427 	}
1428 
1429 	cmd_len = (cmd_iu->len & ~0x3) + 16;
1430 	if (cmd_len > USBG_MAX_CMD) {
1431 		target_free_tag(tv_nexus->tvn_se_sess, &cmd->se_cmd);
1432 		hash_del(&stream->node);
1433 		return -EINVAL;
1434 	}
1435 	memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
1436 
1437 	switch (cmd_iu->prio_attr & 0x7) {
1438 	case UAS_HEAD_TAG:
1439 		cmd->prio_attr = TCM_HEAD_TAG;
1440 		break;
1441 	case UAS_ORDERED_TAG:
1442 		cmd->prio_attr = TCM_ORDERED_TAG;
1443 		break;
1444 	case UAS_ACA:
1445 		cmd->prio_attr = TCM_ACA_TAG;
1446 		break;
1447 	default:
1448 		pr_debug_once("Unsupported prio_attr: %02x.\n",
1449 				cmd_iu->prio_attr);
1450 		fallthrough;
1451 	case UAS_SIMPLE_TAG:
1452 		cmd->prio_attr = TCM_SIMPLE_TAG;
1453 		break;
1454 	}
1455 
1456 skip:
1457 	INIT_WORK(&cmd->work, usbg_cmd_work);
1458 	queue_work(tpg->workqueue, &cmd->work);
1459 
1460 	return 0;
1461 }
1462 
1463 static void bot_cmd_work(struct work_struct *work)
1464 {
1465 	struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
1466 	struct se_cmd *se_cmd;
1467 	struct tcm_usbg_nexus *tv_nexus;
1468 	struct usbg_tpg *tpg;
1469 	int flags = TARGET_SCF_ACK_KREF;
1470 	int dir;
1471 
1472 	/*
1473 	 * Note: each command will spawn its own process, and each stage of the
1474 	 * command is processed sequentially. Should this no longer be the case,
1475 	 * locking is needed.
1476 	 */
1477 	if (cmd->flags & USBG_CMD_PENDING_DATA_WRITE) {
1478 		target_execute_cmd(&cmd->se_cmd);
1479 		cmd->flags &= ~USBG_CMD_PENDING_DATA_WRITE;
1480 		return;
1481 	}
1482 
1483 	se_cmd = &cmd->se_cmd;
1484 	tpg = cmd->fu->tpg;
1485 	tv_nexus = tpg->tpg_nexus;
1486 	dir = get_cmd_dir(cmd->cmd_buf);
1487 	if (dir < 0)
1488 		goto out;
1489 
1490 	target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
1491 			  cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
1492 			  cmd->data_len, cmd->prio_attr, dir, flags);
1493 	return;
1494 
1495 out:
1496 	__target_init_cmd(se_cmd,
1497 			  tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
1498 			  tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
1499 			  cmd->prio_attr, cmd->sense_iu.sense,
1500 			  cmd->unpacked_lun, NULL);
1501 	transport_send_check_condition_and_sense(se_cmd,
1502 				TCM_UNSUPPORTED_SCSI_OPCODE, 0);
1503 }
1504 
1505 static int bot_submit_command(struct f_uas *fu,
1506 		void *cmdbuf, unsigned int len)
1507 {
1508 	struct bulk_cb_wrap *cbw = cmdbuf;
1509 	struct usbg_cmd *cmd;
1510 	struct usbg_tpg *tpg = fu->tpg;
1511 	struct tcm_usbg_nexus *tv_nexus;
1512 	u32 cmd_len;
1513 
1514 	if (cbw->Signature != cpu_to_le32(US_BULK_CB_SIGN)) {
1515 		pr_err("Wrong signature on CBW\n");
1516 		return -EINVAL;
1517 	}
1518 	if (len != 31) {
1519 		pr_err("Wrong length for CBW\n");
1520 		return -EINVAL;
1521 	}
1522 
1523 	cmd_len = cbw->Length;
1524 	if (cmd_len < 1 || cmd_len > 16)
1525 		return -EINVAL;
1526 
1527 	tv_nexus = tpg->tpg_nexus;
1528 	if (!tv_nexus) {
1529 		pr_err("Missing nexus, ignoring command\n");
1530 		return -ENODEV;
1531 	}
1532 
1533 	cmd = usbg_get_cmd(fu, tv_nexus, cbw->Tag);
1534 	if (IS_ERR(cmd)) {
1535 		pr_err("usbg_get_cmd failed\n");
1536 		return -ENOMEM;
1537 	}
1538 	memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
1539 
1540 	cmd->bot_tag = cbw->Tag;
1541 	cmd->prio_attr = TCM_SIMPLE_TAG;
1542 	cmd->unpacked_lun = cbw->Lun;
1543 	cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
1544 	cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
1545 	cmd->se_cmd.tag = le32_to_cpu(cmd->bot_tag);
1546 	cmd->flags = 0;
1547 
1548 	INIT_WORK(&cmd->work, bot_cmd_work);
1549 	queue_work(tpg->workqueue, &cmd->work);
1550 
1551 	return 0;
1552 }
1553 
1554 /* Start fabric.c code */
1555 
1556 static int usbg_check_true(struct se_portal_group *se_tpg)
1557 {
1558 	return 1;
1559 }
1560 
1561 static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
1562 {
1563 	struct usbg_tpg *tpg = container_of(se_tpg,
1564 				struct usbg_tpg, se_tpg);
1565 	struct usbg_tport *tport = tpg->tport;
1566 
1567 	return &tport->tport_name[0];
1568 }
1569 
1570 static u16 usbg_get_tag(struct se_portal_group *se_tpg)
1571 {
1572 	struct usbg_tpg *tpg = container_of(se_tpg,
1573 				struct usbg_tpg, se_tpg);
1574 	return tpg->tport_tpgt;
1575 }
1576 
1577 static void usbg_release_cmd(struct se_cmd *se_cmd)
1578 {
1579 	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1580 			se_cmd);
1581 	struct se_session *se_sess = se_cmd->se_sess;
1582 
1583 	cmd->tag = 0;
1584 	kfree(cmd->data_buf);
1585 	target_free_tag(se_sess, se_cmd);
1586 }
1587 
1588 static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
1589 {
1590 	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, se_cmd);
1591 
1592 	uasp_send_tm_response(cmd);
1593 }
1594 
1595 static void usbg_aborted_task(struct se_cmd *se_cmd)
1596 {
1597 	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, se_cmd);
1598 	struct f_uas *fu = cmd->fu;
1599 	struct usb_gadget *gadget = fuas_to_gadget(fu);
1600 	struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
1601 	int ret = 0;
1602 
1603 	if (stream->req_out->status == -EINPROGRESS)
1604 		ret = usb_ep_dequeue(fu->ep_out, stream->req_out);
1605 	else if (stream->req_in->status == -EINPROGRESS)
1606 		ret = usb_ep_dequeue(fu->ep_in, stream->req_in);
1607 	else if (stream->req_status->status == -EINPROGRESS)
1608 		ret = usb_ep_dequeue(fu->ep_status, stream->req_status);
1609 
1610 	if (ret)
1611 		dev_err(&gadget->dev, "Failed to abort cmd tag %d, (%d)\n",
1612 			cmd->tag, ret);
1613 
1614 	cmd->state = UASP_QUEUE_COMMAND;
1615 }
1616 
1617 static const char *usbg_check_wwn(const char *name)
1618 {
1619 	const char *n;
1620 	unsigned int len;
1621 
1622 	n = strstr(name, "naa.");
1623 	if (!n)
1624 		return NULL;
1625 	n += 4;
1626 	len = strlen(n);
1627 	if (len == 0 || len > USBG_NAMELEN - 1)
1628 		return NULL;
1629 	return n;
1630 }
1631 
1632 static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1633 {
1634 	if (!usbg_check_wwn(name))
1635 		return -EINVAL;
1636 	return 0;
1637 }
1638 
1639 static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
1640 					     const char *name)
1641 {
1642 	struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
1643 			tport_wwn);
1644 	struct usbg_tpg *tpg;
1645 	u16 tpgt;
1646 	int ret;
1647 	struct f_tcm_opts *opts;
1648 	unsigned i;
1649 
1650 	if (strstr(name, "tpgt_") != name)
1651 		return ERR_PTR(-EINVAL);
1652 	if (kstrtou16(name + 5, 0, &tpgt))
1653 		return ERR_PTR(-EINVAL);
1654 	ret = -ENODEV;
1655 	mutex_lock(&tpg_instances_lock);
1656 	for (i = 0; i < TPG_INSTANCES; ++i)
1657 		if (tpg_instances[i].func_inst && !tpg_instances[i].tpg)
1658 			break;
1659 	if (i == TPG_INSTANCES)
1660 		goto unlock_inst;
1661 
1662 	opts = container_of(tpg_instances[i].func_inst, struct f_tcm_opts,
1663 		func_inst);
1664 	mutex_lock(&opts->dep_lock);
1665 	if (!opts->ready)
1666 		goto unlock_dep;
1667 
1668 	if (opts->has_dep) {
1669 		if (!try_module_get(opts->dependent))
1670 			goto unlock_dep;
1671 	} else {
1672 		ret = configfs_depend_item_unlocked(
1673 			wwn->wwn_group.cg_subsys,
1674 			&opts->func_inst.group.cg_item);
1675 		if (ret)
1676 			goto unlock_dep;
1677 	}
1678 
1679 	tpg = kzalloc(sizeof(struct usbg_tpg), GFP_KERNEL);
1680 	ret = -ENOMEM;
1681 	if (!tpg)
1682 		goto unref_dep;
1683 	mutex_init(&tpg->tpg_mutex);
1684 	atomic_set(&tpg->tpg_port_count, 0);
1685 	tpg->workqueue = alloc_workqueue("tcm_usb_gadget",
1686 					 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
1687 	if (!tpg->workqueue)
1688 		goto free_tpg;
1689 
1690 	tpg->tport = tport;
1691 	tpg->tport_tpgt = tpgt;
1692 
1693 	/*
1694 	 * SPC doesn't assign a protocol identifier for USB-SCSI, so we
1695 	 * pretend to be SAS..
1696 	 */
1697 	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);
1698 	if (ret < 0)
1699 		goto free_workqueue;
1700 
1701 	tpg_instances[i].tpg = tpg;
1702 	tpg->fi = tpg_instances[i].func_inst;
1703 	mutex_unlock(&opts->dep_lock);
1704 	mutex_unlock(&tpg_instances_lock);
1705 	return &tpg->se_tpg;
1706 
1707 free_workqueue:
1708 	destroy_workqueue(tpg->workqueue);
1709 free_tpg:
1710 	kfree(tpg);
1711 unref_dep:
1712 	if (opts->has_dep)
1713 		module_put(opts->dependent);
1714 	else
1715 		configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item);
1716 unlock_dep:
1717 	mutex_unlock(&opts->dep_lock);
1718 unlock_inst:
1719 	mutex_unlock(&tpg_instances_lock);
1720 
1721 	return ERR_PTR(ret);
1722 }
1723 
1724 static int tcm_usbg_drop_nexus(struct usbg_tpg *);
1725 
1726 static void usbg_drop_tpg(struct se_portal_group *se_tpg)
1727 {
1728 	struct usbg_tpg *tpg = container_of(se_tpg,
1729 				struct usbg_tpg, se_tpg);
1730 	unsigned i;
1731 	struct f_tcm_opts *opts;
1732 
1733 	tcm_usbg_drop_nexus(tpg);
1734 	core_tpg_deregister(se_tpg);
1735 	destroy_workqueue(tpg->workqueue);
1736 
1737 	mutex_lock(&tpg_instances_lock);
1738 	for (i = 0; i < TPG_INSTANCES; ++i)
1739 		if (tpg_instances[i].tpg == tpg)
1740 			break;
1741 	if (i < TPG_INSTANCES) {
1742 		tpg_instances[i].tpg = NULL;
1743 		opts = container_of(tpg_instances[i].func_inst,
1744 			struct f_tcm_opts, func_inst);
1745 		mutex_lock(&opts->dep_lock);
1746 		if (opts->has_dep)
1747 			module_put(opts->dependent);
1748 		else
1749 			configfs_undepend_item_unlocked(
1750 				&opts->func_inst.group.cg_item);
1751 		mutex_unlock(&opts->dep_lock);
1752 	}
1753 	mutex_unlock(&tpg_instances_lock);
1754 
1755 	kfree(tpg);
1756 }
1757 
1758 static struct se_wwn *usbg_make_tport(
1759 	struct target_fabric_configfs *tf,
1760 	struct config_group *group,
1761 	const char *name)
1762 {
1763 	struct usbg_tport *tport;
1764 	const char *wnn_name;
1765 	u64 wwpn = 0;
1766 
1767 	wnn_name = usbg_check_wwn(name);
1768 	if (!wnn_name)
1769 		return ERR_PTR(-EINVAL);
1770 
1771 	tport = kzalloc(sizeof(struct usbg_tport), GFP_KERNEL);
1772 	if (!(tport))
1773 		return ERR_PTR(-ENOMEM);
1774 
1775 	tport->tport_wwpn = wwpn;
1776 	snprintf(tport->tport_name, sizeof(tport->tport_name), "%s", wnn_name);
1777 	return &tport->tport_wwn;
1778 }
1779 
1780 static void usbg_drop_tport(struct se_wwn *wwn)
1781 {
1782 	struct usbg_tport *tport = container_of(wwn,
1783 				struct usbg_tport, tport_wwn);
1784 	kfree(tport);
1785 }
1786 
1787 /*
1788  * If somebody feels like dropping the version property, go ahead.
1789  */
1790 static ssize_t usbg_wwn_version_show(struct config_item *item,  char *page)
1791 {
1792 	return sprintf(page, "usb-gadget fabric module\n");
1793 }
1794 
1795 CONFIGFS_ATTR_RO(usbg_wwn_, version);
1796 
1797 static struct configfs_attribute *usbg_wwn_attrs[] = {
1798 	&usbg_wwn_attr_version,
1799 	NULL,
1800 };
1801 
1802 static int usbg_attach(struct usbg_tpg *);
1803 static void usbg_detach(struct usbg_tpg *);
1804 
1805 static int usbg_enable_tpg(struct se_portal_group *se_tpg, bool enable)
1806 {
1807 	struct usbg_tpg  *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1808 	int ret = 0;
1809 
1810 	if (enable)
1811 		ret = usbg_attach(tpg);
1812 	else
1813 		usbg_detach(tpg);
1814 	if (ret)
1815 		return ret;
1816 
1817 	tpg->gadget_connect = enable;
1818 
1819 	return 0;
1820 }
1821 
1822 static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page)
1823 {
1824 	struct se_portal_group *se_tpg = to_tpg(item);
1825 	struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1826 	struct tcm_usbg_nexus *tv_nexus;
1827 	ssize_t ret;
1828 
1829 	mutex_lock(&tpg->tpg_mutex);
1830 	tv_nexus = tpg->tpg_nexus;
1831 	if (!tv_nexus) {
1832 		ret = -ENODEV;
1833 		goto out;
1834 	}
1835 	ret = sysfs_emit(page, "%s\n",
1836 			 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1837 out:
1838 	mutex_unlock(&tpg->tpg_mutex);
1839 	return ret;
1840 }
1841 
1842 static int usbg_alloc_sess_cb(struct se_portal_group *se_tpg,
1843 			      struct se_session *se_sess, void *p)
1844 {
1845 	struct usbg_tpg *tpg = container_of(se_tpg,
1846 				struct usbg_tpg, se_tpg);
1847 
1848 	tpg->tpg_nexus = p;
1849 	return 0;
1850 }
1851 
1852 static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
1853 {
1854 	struct tcm_usbg_nexus *tv_nexus;
1855 	int ret = 0;
1856 
1857 	mutex_lock(&tpg->tpg_mutex);
1858 	if (tpg->tpg_nexus) {
1859 		ret = -EEXIST;
1860 		pr_debug("tpg->tpg_nexus already exists\n");
1861 		goto out_unlock;
1862 	}
1863 
1864 	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
1865 	if (!tv_nexus) {
1866 		ret = -ENOMEM;
1867 		goto out_unlock;
1868 	}
1869 
1870 	tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
1871 						     USB_G_DEFAULT_SESSION_TAGS,
1872 						     sizeof(struct usbg_cmd),
1873 						     TARGET_PROT_NORMAL, name,
1874 						     tv_nexus, usbg_alloc_sess_cb);
1875 	if (IS_ERR(tv_nexus->tvn_se_sess)) {
1876 #define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
1877 		pr_debug(MAKE_NEXUS_MSG, name);
1878 #undef MAKE_NEXUS_MSG
1879 		ret = PTR_ERR(tv_nexus->tvn_se_sess);
1880 		kfree(tv_nexus);
1881 	}
1882 
1883 out_unlock:
1884 	mutex_unlock(&tpg->tpg_mutex);
1885 	return ret;
1886 }
1887 
1888 static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
1889 {
1890 	struct se_session *se_sess;
1891 	struct tcm_usbg_nexus *tv_nexus;
1892 	int ret = -ENODEV;
1893 
1894 	mutex_lock(&tpg->tpg_mutex);
1895 	tv_nexus = tpg->tpg_nexus;
1896 	if (!tv_nexus)
1897 		goto out;
1898 
1899 	se_sess = tv_nexus->tvn_se_sess;
1900 	if (!se_sess)
1901 		goto out;
1902 
1903 	if (atomic_read(&tpg->tpg_port_count)) {
1904 		ret = -EPERM;
1905 #define MSG "Unable to remove Host I_T Nexus with active TPG port count: %d\n"
1906 		pr_err(MSG, atomic_read(&tpg->tpg_port_count));
1907 #undef MSG
1908 		goto out;
1909 	}
1910 
1911 	pr_debug("Removing I_T Nexus to Initiator Port: %s\n",
1912 			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1913 	/*
1914 	 * Release the SCSI I_T Nexus to the emulated vHost Target Port
1915 	 */
1916 	target_remove_session(se_sess);
1917 	tpg->tpg_nexus = NULL;
1918 
1919 	kfree(tv_nexus);
1920 	ret = 0;
1921 out:
1922 	mutex_unlock(&tpg->tpg_mutex);
1923 	return ret;
1924 }
1925 
1926 static ssize_t tcm_usbg_tpg_nexus_store(struct config_item *item,
1927 		const char *page, size_t count)
1928 {
1929 	struct se_portal_group *se_tpg = to_tpg(item);
1930 	struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1931 	unsigned char i_port[USBG_NAMELEN], *ptr;
1932 	int ret;
1933 
1934 	if (!strncmp(page, "NULL", 4)) {
1935 		ret = tcm_usbg_drop_nexus(tpg);
1936 		return (!ret) ? count : ret;
1937 	}
1938 	if (strlen(page) >= USBG_NAMELEN) {
1939 
1940 #define NEXUS_STORE_MSG "Emulated NAA Sas Address: %s, exceeds max: %d\n"
1941 		pr_err(NEXUS_STORE_MSG, page, USBG_NAMELEN);
1942 #undef NEXUS_STORE_MSG
1943 		return -EINVAL;
1944 	}
1945 	snprintf(i_port, USBG_NAMELEN, "%s", page);
1946 
1947 	ptr = strstr(i_port, "naa.");
1948 	if (!ptr) {
1949 		pr_err("Missing 'naa.' prefix\n");
1950 		return -EINVAL;
1951 	}
1952 
1953 	if (i_port[strlen(i_port) - 1] == '\n')
1954 		i_port[strlen(i_port) - 1] = '\0';
1955 
1956 	ret = tcm_usbg_make_nexus(tpg, &i_port[0]);
1957 	if (ret < 0)
1958 		return ret;
1959 	return count;
1960 }
1961 
1962 CONFIGFS_ATTR(tcm_usbg_tpg_, nexus);
1963 
1964 static struct configfs_attribute *usbg_base_attrs[] = {
1965 	&tcm_usbg_tpg_attr_nexus,
1966 	NULL,
1967 };
1968 
1969 static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun)
1970 {
1971 	struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1972 
1973 	atomic_inc(&tpg->tpg_port_count);
1974 	smp_mb__after_atomic();
1975 	return 0;
1976 }
1977 
1978 static void usbg_port_unlink(struct se_portal_group *se_tpg,
1979 		struct se_lun *se_lun)
1980 {
1981 	struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1982 
1983 	atomic_dec(&tpg->tpg_port_count);
1984 	smp_mb__after_atomic();
1985 }
1986 
1987 static int usbg_check_stop_free(struct se_cmd *se_cmd)
1988 {
1989 	return target_put_sess_cmd(se_cmd);
1990 }
1991 
1992 static const struct target_core_fabric_ops usbg_ops = {
1993 	.module				= THIS_MODULE,
1994 	.fabric_name			= "usb_gadget",
1995 	.tpg_get_wwn			= usbg_get_fabric_wwn,
1996 	.tpg_get_tag			= usbg_get_tag,
1997 	.tpg_check_demo_mode		= usbg_check_true,
1998 	.release_cmd			= usbg_release_cmd,
1999 	.sess_get_initiator_sid		= NULL,
2000 	.write_pending			= usbg_send_write_request,
2001 	.queue_data_in			= usbg_send_read_response,
2002 	.queue_status			= usbg_send_status_response,
2003 	.queue_tm_rsp			= usbg_queue_tm_rsp,
2004 	.aborted_task			= usbg_aborted_task,
2005 	.check_stop_free		= usbg_check_stop_free,
2006 
2007 	.fabric_make_wwn		= usbg_make_tport,
2008 	.fabric_drop_wwn		= usbg_drop_tport,
2009 	.fabric_make_tpg		= usbg_make_tpg,
2010 	.fabric_enable_tpg		= usbg_enable_tpg,
2011 	.fabric_drop_tpg		= usbg_drop_tpg,
2012 	.fabric_post_link		= usbg_port_link,
2013 	.fabric_pre_unlink		= usbg_port_unlink,
2014 	.fabric_init_nodeacl		= usbg_init_nodeacl,
2015 
2016 	.tfc_wwn_attrs			= usbg_wwn_attrs,
2017 	.tfc_tpg_base_attrs		= usbg_base_attrs,
2018 
2019 	.default_submit_type		= TARGET_DIRECT_SUBMIT,
2020 	.direct_submit_supp		= 1,
2021 };
2022 
2023 /* Start gadget.c code */
2024 
2025 static struct usb_interface_descriptor bot_intf_desc = {
2026 	.bLength =              sizeof(bot_intf_desc),
2027 	.bDescriptorType =      USB_DT_INTERFACE,
2028 	.bNumEndpoints =        2,
2029 	.bAlternateSetting =	USB_G_ALT_INT_BBB,
2030 	.bInterfaceClass =      USB_CLASS_MASS_STORAGE,
2031 	.bInterfaceSubClass =   USB_SC_SCSI,
2032 	.bInterfaceProtocol =   USB_PR_BULK,
2033 };
2034 
2035 static struct usb_interface_descriptor uasp_intf_desc = {
2036 	.bLength =		sizeof(uasp_intf_desc),
2037 	.bDescriptorType =	USB_DT_INTERFACE,
2038 	.bNumEndpoints =	4,
2039 	.bAlternateSetting =	USB_G_ALT_INT_UAS,
2040 	.bInterfaceClass =	USB_CLASS_MASS_STORAGE,
2041 	.bInterfaceSubClass =	USB_SC_SCSI,
2042 	.bInterfaceProtocol =	USB_PR_UAS,
2043 };
2044 
2045 static struct usb_endpoint_descriptor uasp_bi_desc = {
2046 	.bLength =		USB_DT_ENDPOINT_SIZE,
2047 	.bDescriptorType =	USB_DT_ENDPOINT,
2048 	.bEndpointAddress =	USB_DIR_IN,
2049 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
2050 	.wMaxPacketSize =	cpu_to_le16(512),
2051 };
2052 
2053 static struct usb_endpoint_descriptor uasp_fs_bi_desc = {
2054 	.bLength =		USB_DT_ENDPOINT_SIZE,
2055 	.bDescriptorType =	USB_DT_ENDPOINT,
2056 	.bEndpointAddress =	USB_DIR_IN,
2057 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
2058 };
2059 
2060 static struct usb_pipe_usage_descriptor uasp_bi_pipe_desc = {
2061 	.bLength =		sizeof(uasp_bi_pipe_desc),
2062 	.bDescriptorType =	USB_DT_PIPE_USAGE,
2063 	.bPipeID =		DATA_IN_PIPE_ID,
2064 };
2065 
2066 static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
2067 	.bLength =		USB_DT_ENDPOINT_SIZE,
2068 	.bDescriptorType =	USB_DT_ENDPOINT,
2069 	.bEndpointAddress =	USB_DIR_IN,
2070 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
2071 	.wMaxPacketSize =	cpu_to_le16(1024),
2072 };
2073 
2074 static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
2075 	.bLength =		sizeof(uasp_bi_ep_comp_desc),
2076 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
2077 	.bMaxBurst =		15,
2078 	.bmAttributes =		UASP_SS_EP_COMP_LOG_STREAMS,
2079 	.wBytesPerInterval =	0,
2080 };
2081 
2082 static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
2083 	.bLength =		sizeof(bot_bi_ep_comp_desc),
2084 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
2085 	.bMaxBurst =		15,
2086 };
2087 
2088 static struct usb_endpoint_descriptor uasp_bo_desc = {
2089 	.bLength =		USB_DT_ENDPOINT_SIZE,
2090 	.bDescriptorType =	USB_DT_ENDPOINT,
2091 	.bEndpointAddress =	USB_DIR_OUT,
2092 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
2093 	.wMaxPacketSize =	cpu_to_le16(512),
2094 };
2095 
2096 static struct usb_endpoint_descriptor uasp_fs_bo_desc = {
2097 	.bLength =		USB_DT_ENDPOINT_SIZE,
2098 	.bDescriptorType =	USB_DT_ENDPOINT,
2099 	.bEndpointAddress =	USB_DIR_OUT,
2100 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
2101 };
2102 
2103 static struct usb_pipe_usage_descriptor uasp_bo_pipe_desc = {
2104 	.bLength =		sizeof(uasp_bo_pipe_desc),
2105 	.bDescriptorType =	USB_DT_PIPE_USAGE,
2106 	.bPipeID =		DATA_OUT_PIPE_ID,
2107 };
2108 
2109 static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
2110 	.bLength =		USB_DT_ENDPOINT_SIZE,
2111 	.bDescriptorType =	USB_DT_ENDPOINT,
2112 	.bEndpointAddress =	USB_DIR_OUT,
2113 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
2114 	.wMaxPacketSize =	cpu_to_le16(0x400),
2115 };
2116 
2117 static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
2118 	.bLength =		sizeof(uasp_bo_ep_comp_desc),
2119 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
2120 	.bMaxBurst =		15,
2121 	.bmAttributes =		UASP_SS_EP_COMP_LOG_STREAMS,
2122 };
2123 
2124 static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
2125 	.bLength =		sizeof(bot_bo_ep_comp_desc),
2126 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
2127 	.bMaxBurst =		15,
2128 };
2129 
2130 static struct usb_endpoint_descriptor uasp_status_desc = {
2131 	.bLength =		USB_DT_ENDPOINT_SIZE,
2132 	.bDescriptorType =	USB_DT_ENDPOINT,
2133 	.bEndpointAddress =	USB_DIR_IN,
2134 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
2135 	.wMaxPacketSize =	cpu_to_le16(512),
2136 };
2137 
2138 static struct usb_endpoint_descriptor uasp_fs_status_desc = {
2139 	.bLength =		USB_DT_ENDPOINT_SIZE,
2140 	.bDescriptorType =	USB_DT_ENDPOINT,
2141 	.bEndpointAddress =	USB_DIR_IN,
2142 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
2143 };
2144 
2145 static struct usb_pipe_usage_descriptor uasp_status_pipe_desc = {
2146 	.bLength =		sizeof(uasp_status_pipe_desc),
2147 	.bDescriptorType =	USB_DT_PIPE_USAGE,
2148 	.bPipeID =		STATUS_PIPE_ID,
2149 };
2150 
2151 static struct usb_endpoint_descriptor uasp_ss_status_desc = {
2152 	.bLength =		USB_DT_ENDPOINT_SIZE,
2153 	.bDescriptorType =	USB_DT_ENDPOINT,
2154 	.bEndpointAddress =	USB_DIR_IN,
2155 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
2156 	.wMaxPacketSize =	cpu_to_le16(1024),
2157 };
2158 
2159 static struct usb_ss_ep_comp_descriptor uasp_status_in_ep_comp_desc = {
2160 	.bLength =		sizeof(uasp_status_in_ep_comp_desc),
2161 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
2162 	.bmAttributes =		UASP_SS_EP_COMP_LOG_STREAMS,
2163 };
2164 
2165 static struct usb_endpoint_descriptor uasp_cmd_desc = {
2166 	.bLength =		USB_DT_ENDPOINT_SIZE,
2167 	.bDescriptorType =	USB_DT_ENDPOINT,
2168 	.bEndpointAddress =	USB_DIR_OUT,
2169 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
2170 	.wMaxPacketSize =	cpu_to_le16(512),
2171 };
2172 
2173 static struct usb_endpoint_descriptor uasp_fs_cmd_desc = {
2174 	.bLength =		USB_DT_ENDPOINT_SIZE,
2175 	.bDescriptorType =	USB_DT_ENDPOINT,
2176 	.bEndpointAddress =	USB_DIR_OUT,
2177 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
2178 };
2179 
2180 static struct usb_pipe_usage_descriptor uasp_cmd_pipe_desc = {
2181 	.bLength =		sizeof(uasp_cmd_pipe_desc),
2182 	.bDescriptorType =	USB_DT_PIPE_USAGE,
2183 	.bPipeID =		CMD_PIPE_ID,
2184 };
2185 
2186 static struct usb_endpoint_descriptor uasp_ss_cmd_desc = {
2187 	.bLength =		USB_DT_ENDPOINT_SIZE,
2188 	.bDescriptorType =	USB_DT_ENDPOINT,
2189 	.bEndpointAddress =	USB_DIR_OUT,
2190 	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
2191 	.wMaxPacketSize =	cpu_to_le16(1024),
2192 };
2193 
2194 static struct usb_ss_ep_comp_descriptor uasp_cmd_comp_desc = {
2195 	.bLength =		sizeof(uasp_cmd_comp_desc),
2196 	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
2197 };
2198 
2199 static struct usb_descriptor_header *uasp_fs_function_desc[] = {
2200 	(struct usb_descriptor_header *) &bot_intf_desc,
2201 	(struct usb_descriptor_header *) &uasp_fs_bi_desc,
2202 	(struct usb_descriptor_header *) &uasp_fs_bo_desc,
2203 
2204 	(struct usb_descriptor_header *) &uasp_intf_desc,
2205 	(struct usb_descriptor_header *) &uasp_fs_bi_desc,
2206 	(struct usb_descriptor_header *) &uasp_bi_pipe_desc,
2207 	(struct usb_descriptor_header *) &uasp_fs_bo_desc,
2208 	(struct usb_descriptor_header *) &uasp_bo_pipe_desc,
2209 	(struct usb_descriptor_header *) &uasp_fs_status_desc,
2210 	(struct usb_descriptor_header *) &uasp_status_pipe_desc,
2211 	(struct usb_descriptor_header *) &uasp_fs_cmd_desc,
2212 	(struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
2213 	NULL,
2214 };
2215 
2216 static struct usb_descriptor_header *uasp_hs_function_desc[] = {
2217 	(struct usb_descriptor_header *) &bot_intf_desc,
2218 	(struct usb_descriptor_header *) &uasp_bi_desc,
2219 	(struct usb_descriptor_header *) &uasp_bo_desc,
2220 
2221 	(struct usb_descriptor_header *) &uasp_intf_desc,
2222 	(struct usb_descriptor_header *) &uasp_bi_desc,
2223 	(struct usb_descriptor_header *) &uasp_bi_pipe_desc,
2224 	(struct usb_descriptor_header *) &uasp_bo_desc,
2225 	(struct usb_descriptor_header *) &uasp_bo_pipe_desc,
2226 	(struct usb_descriptor_header *) &uasp_status_desc,
2227 	(struct usb_descriptor_header *) &uasp_status_pipe_desc,
2228 	(struct usb_descriptor_header *) &uasp_cmd_desc,
2229 	(struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
2230 	NULL,
2231 };
2232 
2233 static struct usb_descriptor_header *uasp_ss_function_desc[] = {
2234 	(struct usb_descriptor_header *) &bot_intf_desc,
2235 	(struct usb_descriptor_header *) &uasp_ss_bi_desc,
2236 	(struct usb_descriptor_header *) &bot_bi_ep_comp_desc,
2237 	(struct usb_descriptor_header *) &uasp_ss_bo_desc,
2238 	(struct usb_descriptor_header *) &bot_bo_ep_comp_desc,
2239 
2240 	(struct usb_descriptor_header *) &uasp_intf_desc,
2241 	(struct usb_descriptor_header *) &uasp_ss_bi_desc,
2242 	(struct usb_descriptor_header *) &uasp_bi_ep_comp_desc,
2243 	(struct usb_descriptor_header *) &uasp_bi_pipe_desc,
2244 	(struct usb_descriptor_header *) &uasp_ss_bo_desc,
2245 	(struct usb_descriptor_header *) &uasp_bo_ep_comp_desc,
2246 	(struct usb_descriptor_header *) &uasp_bo_pipe_desc,
2247 	(struct usb_descriptor_header *) &uasp_ss_status_desc,
2248 	(struct usb_descriptor_header *) &uasp_status_in_ep_comp_desc,
2249 	(struct usb_descriptor_header *) &uasp_status_pipe_desc,
2250 	(struct usb_descriptor_header *) &uasp_ss_cmd_desc,
2251 	(struct usb_descriptor_header *) &uasp_cmd_comp_desc,
2252 	(struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
2253 	NULL,
2254 };
2255 
2256 static struct usb_string	tcm_us_strings[] = {
2257 	[USB_G_STR_INT_UAS].s		= "USB Attached SCSI",
2258 	[USB_G_STR_INT_BBB].s		= "Bulk Only Transport",
2259 	{ },
2260 };
2261 
2262 static struct usb_gadget_strings tcm_stringtab = {
2263 	.language = 0x0409,
2264 	.strings = tcm_us_strings,
2265 };
2266 
2267 static struct usb_gadget_strings *tcm_strings[] = {
2268 	&tcm_stringtab,
2269 	NULL,
2270 };
2271 
2272 static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
2273 {
2274 	struct f_uas		*fu = to_f_uas(f);
2275 	struct usb_string	*us;
2276 	struct usb_gadget	*gadget = c->cdev->gadget;
2277 	struct usb_ep		*ep;
2278 	struct f_tcm_opts	*opts;
2279 	int			iface;
2280 	int			ret;
2281 
2282 	opts = container_of(f->fi, struct f_tcm_opts, func_inst);
2283 
2284 	mutex_lock(&opts->dep_lock);
2285 	if (!opts->can_attach) {
2286 		mutex_unlock(&opts->dep_lock);
2287 		return -ENODEV;
2288 	}
2289 	mutex_unlock(&opts->dep_lock);
2290 	us = usb_gstrings_attach(c->cdev, tcm_strings,
2291 		ARRAY_SIZE(tcm_us_strings));
2292 	if (IS_ERR(us))
2293 		return PTR_ERR(us);
2294 	bot_intf_desc.iInterface = us[USB_G_STR_INT_BBB].id;
2295 	uasp_intf_desc.iInterface = us[USB_G_STR_INT_UAS].id;
2296 
2297 	iface = usb_interface_id(c, f);
2298 	if (iface < 0)
2299 		return iface;
2300 
2301 	bot_intf_desc.bInterfaceNumber = iface;
2302 	uasp_intf_desc.bInterfaceNumber = iface;
2303 	fu->iface = iface;
2304 	ep = usb_ep_autoconfig(gadget, &uasp_fs_bi_desc);
2305 	if (!ep)
2306 		goto ep_fail;
2307 
2308 	fu->ep_in = ep;
2309 
2310 	ep = usb_ep_autoconfig(gadget, &uasp_fs_bo_desc);
2311 	if (!ep)
2312 		goto ep_fail;
2313 	fu->ep_out = ep;
2314 
2315 	ep = usb_ep_autoconfig(gadget, &uasp_fs_status_desc);
2316 	if (!ep)
2317 		goto ep_fail;
2318 	fu->ep_status = ep;
2319 
2320 	ep = usb_ep_autoconfig(gadget, &uasp_fs_cmd_desc);
2321 	if (!ep)
2322 		goto ep_fail;
2323 	fu->ep_cmd = ep;
2324 
2325 	/* Assume endpoint addresses are the same for both speeds */
2326 	uasp_bi_desc.bEndpointAddress =	uasp_fs_bi_desc.bEndpointAddress;
2327 	uasp_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
2328 	uasp_status_desc.bEndpointAddress =
2329 		uasp_fs_status_desc.bEndpointAddress;
2330 	uasp_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
2331 
2332 	uasp_ss_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress;
2333 	uasp_ss_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
2334 	uasp_ss_status_desc.bEndpointAddress =
2335 		uasp_fs_status_desc.bEndpointAddress;
2336 	uasp_ss_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
2337 
2338 	ret = usb_assign_descriptors(f, uasp_fs_function_desc,
2339 			uasp_hs_function_desc, uasp_ss_function_desc,
2340 			uasp_ss_function_desc);
2341 	if (ret)
2342 		goto ep_fail;
2343 
2344 	return 0;
2345 ep_fail:
2346 	pr_err("Can't claim all required eps\n");
2347 
2348 	return -ENOTSUPP;
2349 }
2350 
2351 struct guas_setup_wq {
2352 	struct work_struct work;
2353 	struct f_uas *fu;
2354 	unsigned int alt;
2355 };
2356 
2357 static void tcm_delayed_set_alt(struct work_struct *wq)
2358 {
2359 	struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq,
2360 			work);
2361 	struct f_uas *fu = work->fu;
2362 	int alt = work->alt;
2363 
2364 	kfree(work);
2365 
2366 	if (fu->flags & USBG_IS_BOT)
2367 		bot_cleanup_old_alt(fu);
2368 	if (fu->flags & USBG_IS_UAS)
2369 		uasp_cleanup_old_alt(fu);
2370 
2371 	if (alt == USB_G_ALT_INT_BBB)
2372 		bot_set_alt(fu);
2373 	else if (alt == USB_G_ALT_INT_UAS)
2374 		uasp_set_alt(fu);
2375 	usb_composite_setup_continue(fu->function.config->cdev);
2376 }
2377 
2378 static int tcm_get_alt(struct usb_function *f, unsigned intf)
2379 {
2380 	struct f_uas *fu = to_f_uas(f);
2381 
2382 	if (fu->iface != intf)
2383 		return -EOPNOTSUPP;
2384 
2385 	if (fu->flags & USBG_IS_BOT)
2386 		return USB_G_ALT_INT_BBB;
2387 	else if (fu->flags & USBG_IS_UAS)
2388 		return USB_G_ALT_INT_UAS;
2389 
2390 	return -EOPNOTSUPP;
2391 }
2392 
2393 static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2394 {
2395 	struct f_uas *fu = to_f_uas(f);
2396 
2397 	if (fu->iface != intf)
2398 		return -EOPNOTSUPP;
2399 
2400 	if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
2401 		struct guas_setup_wq *work;
2402 
2403 		work = kmalloc(sizeof(*work), GFP_ATOMIC);
2404 		if (!work)
2405 			return -ENOMEM;
2406 		INIT_WORK(&work->work, tcm_delayed_set_alt);
2407 		work->fu = fu;
2408 		work->alt = alt;
2409 		schedule_work(&work->work);
2410 		return USB_GADGET_DELAYED_STATUS;
2411 	}
2412 	return -EOPNOTSUPP;
2413 }
2414 
2415 static void tcm_disable(struct usb_function *f)
2416 {
2417 	struct f_uas *fu = to_f_uas(f);
2418 
2419 	if (fu->flags & USBG_IS_UAS)
2420 		uasp_cleanup_old_alt(fu);
2421 	else if (fu->flags & USBG_IS_BOT)
2422 		bot_cleanup_old_alt(fu);
2423 	fu->flags = 0;
2424 }
2425 
2426 static int tcm_setup(struct usb_function *f,
2427 		const struct usb_ctrlrequest *ctrl)
2428 {
2429 	struct f_uas *fu = to_f_uas(f);
2430 
2431 	if (!(fu->flags & USBG_IS_BOT))
2432 		return -EOPNOTSUPP;
2433 
2434 	return usbg_bot_setup(f, ctrl);
2435 }
2436 
2437 static inline struct f_tcm_opts *to_f_tcm_opts(struct config_item *item)
2438 {
2439 	return container_of(to_config_group(item), struct f_tcm_opts,
2440 		func_inst.group);
2441 }
2442 
2443 static void tcm_attr_release(struct config_item *item)
2444 {
2445 	struct f_tcm_opts *opts = to_f_tcm_opts(item);
2446 
2447 	usb_put_function_instance(&opts->func_inst);
2448 }
2449 
2450 static const struct configfs_item_operations tcm_item_ops = {
2451 	.release		= tcm_attr_release,
2452 };
2453 
2454 static const struct config_item_type tcm_func_type = {
2455 	.ct_item_ops	= &tcm_item_ops,
2456 	.ct_owner	= THIS_MODULE,
2457 };
2458 
2459 static void tcm_free_inst(struct usb_function_instance *f)
2460 {
2461 	struct f_tcm_opts *opts;
2462 	unsigned i;
2463 
2464 	opts = container_of(f, struct f_tcm_opts, func_inst);
2465 
2466 	mutex_lock(&tpg_instances_lock);
2467 	for (i = 0; i < TPG_INSTANCES; ++i)
2468 		if (tpg_instances[i].func_inst == f)
2469 			break;
2470 	if (i < TPG_INSTANCES)
2471 		tpg_instances[i].func_inst = NULL;
2472 	mutex_unlock(&tpg_instances_lock);
2473 
2474 	kfree(opts);
2475 }
2476 
2477 static int tcm_register_callback(struct usb_function_instance *f)
2478 {
2479 	struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2480 
2481 	mutex_lock(&opts->dep_lock);
2482 	opts->can_attach = true;
2483 	mutex_unlock(&opts->dep_lock);
2484 
2485 	return 0;
2486 }
2487 
2488 static void tcm_unregister_callback(struct usb_function_instance *f)
2489 {
2490 	struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2491 
2492 	mutex_lock(&opts->dep_lock);
2493 	unregister_gadget_item(opts->
2494 		func_inst.group.cg_item.ci_parent->ci_parent);
2495 	opts->can_attach = false;
2496 	mutex_unlock(&opts->dep_lock);
2497 }
2498 
2499 static int usbg_attach(struct usbg_tpg *tpg)
2500 {
2501 	struct usb_function_instance *f = tpg->fi;
2502 	struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2503 
2504 	if (opts->tcm_register_callback)
2505 		return opts->tcm_register_callback(f);
2506 
2507 	return 0;
2508 }
2509 
2510 static void usbg_detach(struct usbg_tpg *tpg)
2511 {
2512 	struct usb_function_instance *f = tpg->fi;
2513 	struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2514 
2515 	if (opts->tcm_unregister_callback)
2516 		opts->tcm_unregister_callback(f);
2517 }
2518 
2519 static int tcm_set_name(struct usb_function_instance *f, const char *name)
2520 {
2521 	struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2522 
2523 	pr_debug("tcm: Activating %s\n", name);
2524 
2525 	mutex_lock(&opts->dep_lock);
2526 	opts->ready = true;
2527 	mutex_unlock(&opts->dep_lock);
2528 
2529 	return 0;
2530 }
2531 
2532 static struct usb_function_instance *tcm_alloc_inst(void)
2533 {
2534 	struct f_tcm_opts *opts;
2535 	int i;
2536 
2537 
2538 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
2539 	if (!opts)
2540 		return ERR_PTR(-ENOMEM);
2541 
2542 	mutex_lock(&tpg_instances_lock);
2543 	for (i = 0; i < TPG_INSTANCES; ++i)
2544 		if (!tpg_instances[i].func_inst)
2545 			break;
2546 
2547 	if (i == TPG_INSTANCES) {
2548 		mutex_unlock(&tpg_instances_lock);
2549 		kfree(opts);
2550 		return ERR_PTR(-EBUSY);
2551 	}
2552 	tpg_instances[i].func_inst = &opts->func_inst;
2553 	mutex_unlock(&tpg_instances_lock);
2554 
2555 	mutex_init(&opts->dep_lock);
2556 	opts->func_inst.set_inst_name = tcm_set_name;
2557 	opts->func_inst.free_func_inst = tcm_free_inst;
2558 	opts->tcm_register_callback = tcm_register_callback;
2559 	opts->tcm_unregister_callback = tcm_unregister_callback;
2560 
2561 	config_group_init_type_name(&opts->func_inst.group, "",
2562 			&tcm_func_type);
2563 
2564 	return &opts->func_inst;
2565 }
2566 
2567 static void tcm_free(struct usb_function *f)
2568 {
2569 	struct f_uas *tcm = to_f_uas(f);
2570 
2571 	kfree(tcm);
2572 }
2573 
2574 static void tcm_unbind(struct usb_configuration *c, struct usb_function *f)
2575 {
2576 	usb_free_all_descriptors(f);
2577 }
2578 
2579 static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
2580 {
2581 	struct f_uas *fu;
2582 	unsigned i;
2583 
2584 	mutex_lock(&tpg_instances_lock);
2585 	for (i = 0; i < TPG_INSTANCES; ++i)
2586 		if (tpg_instances[i].func_inst == fi)
2587 			break;
2588 	if (i == TPG_INSTANCES) {
2589 		mutex_unlock(&tpg_instances_lock);
2590 		return ERR_PTR(-ENODEV);
2591 	}
2592 
2593 	fu = kzalloc(sizeof(*fu), GFP_KERNEL);
2594 	if (!fu) {
2595 		mutex_unlock(&tpg_instances_lock);
2596 		return ERR_PTR(-ENOMEM);
2597 	}
2598 
2599 	fu->function.name = "Target Function";
2600 	fu->function.bind = tcm_bind;
2601 	fu->function.unbind = tcm_unbind;
2602 	fu->function.set_alt = tcm_set_alt;
2603 	fu->function.get_alt = tcm_get_alt;
2604 	fu->function.setup = tcm_setup;
2605 	fu->function.disable = tcm_disable;
2606 	fu->function.free_func = tcm_free;
2607 	fu->tpg = tpg_instances[i].tpg;
2608 
2609 	hash_init(fu->stream_hash);
2610 	mutex_unlock(&tpg_instances_lock);
2611 
2612 	return &fu->function;
2613 }
2614 
2615 DECLARE_USB_FUNCTION(tcm, tcm_alloc_inst, tcm_alloc);
2616 
2617 static int __init tcm_init(void)
2618 {
2619 	int ret;
2620 
2621 	ret = usb_function_register(&tcmusb_func);
2622 	if (ret)
2623 		return ret;
2624 
2625 	ret = target_register_template(&usbg_ops);
2626 	if (ret)
2627 		usb_function_unregister(&tcmusb_func);
2628 
2629 	return ret;
2630 }
2631 module_init(tcm_init);
2632 
2633 static void __exit tcm_exit(void)
2634 {
2635 	target_unregister_template(&usbg_ops);
2636 	usb_function_unregister(&tcmusb_func);
2637 }
2638 module_exit(tcm_exit);
2639 
2640 MODULE_DESCRIPTION("Target based USB-Gadget");
2641 MODULE_LICENSE("GPL");
2642 MODULE_AUTHOR("Sebastian Andrzej Siewior");
2643