xref: /linux/drivers/target/loopback/tcm_loop.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*******************************************************************************
2  *
3  * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4  * for emulated SAS initiator ports
5  *
6  * © Copyright 2011-2013 Datera, Inc.
7  *
8  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9  *
10  * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  ****************************************************************************/
22 
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/configfs.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
34 
35 #include <target/target_core_base.h>
36 #include <target/target_core_fabric.h>
37 #include <target/target_core_fabric_configfs.h>
38 
39 #include "tcm_loop.h"
40 
41 #define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
42 
43 static struct workqueue_struct *tcm_loop_workqueue;
44 static struct kmem_cache *tcm_loop_cmd_cache;
45 
46 static int tcm_loop_hba_no_cnt;
47 
48 static int tcm_loop_queue_status(struct se_cmd *se_cmd);
49 
50 /*
51  * Called from struct target_core_fabric_ops->check_stop_free()
52  */
53 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
54 {
55 	/*
56 	 * Do not release struct se_cmd's containing a valid TMR
57 	 * pointer.  These will be released directly in tcm_loop_device_reset()
58 	 * with transport_generic_free_cmd().
59 	 */
60 	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
61 		return 0;
62 	/*
63 	 * Release the struct se_cmd, which will make a callback to release
64 	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
65 	 */
66 	transport_generic_free_cmd(se_cmd, 0);
67 	return 1;
68 }
69 
70 static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
71 {
72 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
73 				struct tcm_loop_cmd, tl_se_cmd);
74 
75 	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
76 }
77 
78 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
79 {
80 	seq_printf(m, "tcm_loop_proc_info()\n");
81 	return 0;
82 }
83 
84 static int tcm_loop_driver_probe(struct device *);
85 static int tcm_loop_driver_remove(struct device *);
86 
87 static int pseudo_lld_bus_match(struct device *dev,
88 				struct device_driver *dev_driver)
89 {
90 	return 1;
91 }
92 
93 static struct bus_type tcm_loop_lld_bus = {
94 	.name			= "tcm_loop_bus",
95 	.match			= pseudo_lld_bus_match,
96 	.probe			= tcm_loop_driver_probe,
97 	.remove			= tcm_loop_driver_remove,
98 };
99 
100 static struct device_driver tcm_loop_driverfs = {
101 	.name			= "tcm_loop",
102 	.bus			= &tcm_loop_lld_bus,
103 };
104 /*
105  * Used with root_device_register() in tcm_loop_alloc_core_bus() below
106  */
107 static struct device *tcm_loop_primary;
108 
109 static void tcm_loop_submission_work(struct work_struct *work)
110 {
111 	struct tcm_loop_cmd *tl_cmd =
112 		container_of(work, struct tcm_loop_cmd, work);
113 	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
114 	struct scsi_cmnd *sc = tl_cmd->sc;
115 	struct tcm_loop_nexus *tl_nexus;
116 	struct tcm_loop_hba *tl_hba;
117 	struct tcm_loop_tpg *tl_tpg;
118 	struct scatterlist *sgl_bidi = NULL;
119 	u32 sgl_bidi_count = 0, transfer_length;
120 	int rc;
121 
122 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
123 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
124 
125 	/*
126 	 * Ensure that this tl_tpg reference from the incoming sc->device->id
127 	 * has already been configured via tcm_loop_make_naa_tpg().
128 	 */
129 	if (!tl_tpg->tl_hba) {
130 		set_host_byte(sc, DID_NO_CONNECT);
131 		goto out_done;
132 	}
133 	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
134 		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
135 		goto out_done;
136 	}
137 	tl_nexus = tl_tpg->tl_nexus;
138 	if (!tl_nexus) {
139 		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
140 				" does not exist\n");
141 		set_host_byte(sc, DID_ERROR);
142 		goto out_done;
143 	}
144 	if (scsi_bidi_cmnd(sc)) {
145 		struct scsi_data_buffer *sdb = scsi_in(sc);
146 
147 		sgl_bidi = sdb->table.sgl;
148 		sgl_bidi_count = sdb->table.nents;
149 		se_cmd->se_cmd_flags |= SCF_BIDI;
150 
151 	}
152 
153 	transfer_length = scsi_transfer_length(sc);
154 	if (!scsi_prot_sg_count(sc) &&
155 	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
156 		se_cmd->prot_pto = true;
157 		/*
158 		 * loopback transport doesn't support
159 		 * WRITE_GENERATE, READ_STRIP protection
160 		 * information operations, go ahead unprotected.
161 		 */
162 		transfer_length = scsi_bufflen(sc);
163 	}
164 
165 	se_cmd->tag = tl_cmd->sc_cmd_tag;
166 	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
167 			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
168 			transfer_length, TCM_SIMPLE_TAG,
169 			sc->sc_data_direction, 0,
170 			scsi_sglist(sc), scsi_sg_count(sc),
171 			sgl_bidi, sgl_bidi_count,
172 			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
173 	if (rc < 0) {
174 		set_host_byte(sc, DID_NO_CONNECT);
175 		goto out_done;
176 	}
177 	return;
178 
179 out_done:
180 	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
181 	sc->scsi_done(sc);
182 	return;
183 }
184 
185 /*
186  * ->queuecommand can be and usually is called from interrupt context, so
187  * defer the actual submission to a workqueue.
188  */
189 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
190 {
191 	struct tcm_loop_cmd *tl_cmd;
192 
193 	pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
194 		" scsi_buf_len: %u\n", sc->device->host->host_no,
195 		sc->device->id, sc->device->channel, sc->device->lun,
196 		sc->cmnd[0], scsi_bufflen(sc));
197 
198 	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
199 	if (!tl_cmd) {
200 		pr_err("Unable to allocate struct tcm_loop_cmd\n");
201 		set_host_byte(sc, DID_ERROR);
202 		sc->scsi_done(sc);
203 		return 0;
204 	}
205 
206 	tl_cmd->sc = sc;
207 	tl_cmd->sc_cmd_tag = sc->request->tag;
208 	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
209 	queue_work(tcm_loop_workqueue, &tl_cmd->work);
210 	return 0;
211 }
212 
213 /*
214  * Called from SCSI EH process context to issue a LUN_RESET TMR
215  * to struct scsi_device
216  */
217 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
218 			      u64 lun, int task, enum tcm_tmreq_table tmr)
219 {
220 	struct se_cmd *se_cmd = NULL;
221 	struct se_session *se_sess;
222 	struct se_portal_group *se_tpg;
223 	struct tcm_loop_nexus *tl_nexus;
224 	struct tcm_loop_cmd *tl_cmd = NULL;
225 	struct tcm_loop_tmr *tl_tmr = NULL;
226 	int ret = TMR_FUNCTION_FAILED, rc;
227 
228 	/*
229 	 * Locate the tl_nexus and se_sess pointers
230 	 */
231 	tl_nexus = tl_tpg->tl_nexus;
232 	if (!tl_nexus) {
233 		pr_err("Unable to perform device reset without"
234 				" active I_T Nexus\n");
235 		return ret;
236 	}
237 
238 	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
239 	if (!tl_cmd) {
240 		pr_err("Unable to allocate memory for tl_cmd\n");
241 		return ret;
242 	}
243 
244 	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
245 	if (!tl_tmr) {
246 		pr_err("Unable to allocate memory for tl_tmr\n");
247 		goto release;
248 	}
249 	init_waitqueue_head(&tl_tmr->tl_tmr_wait);
250 
251 	se_cmd = &tl_cmd->tl_se_cmd;
252 	se_tpg = &tl_tpg->tl_se_tpg;
253 	se_sess = tl_tpg->tl_nexus->se_sess;
254 	/*
255 	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
256 	 */
257 	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
258 				DMA_NONE, TCM_SIMPLE_TAG,
259 				&tl_cmd->tl_sense_buf[0]);
260 
261 	rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
262 	if (rc < 0)
263 		goto release;
264 
265 	if (tmr == TMR_ABORT_TASK)
266 		se_cmd->se_tmr_req->ref_task_tag = task;
267 
268 	/*
269 	 * Locate the underlying TCM struct se_lun
270 	 */
271 	if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
272 		ret = TMR_LUN_DOES_NOT_EXIST;
273 		goto release;
274 	}
275 	/*
276 	 * Queue the TMR to TCM Core and sleep waiting for
277 	 * tcm_loop_queue_tm_rsp() to wake us up.
278 	 */
279 	transport_generic_handle_tmr(se_cmd);
280 	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
281 	/*
282 	 * The TMR LUN_RESET has completed, check the response status and
283 	 * then release allocations.
284 	 */
285 	ret = se_cmd->se_tmr_req->response;
286 release:
287 	if (se_cmd)
288 		transport_generic_free_cmd(se_cmd, 1);
289 	else
290 		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
291 	kfree(tl_tmr);
292 	return ret;
293 }
294 
295 static int tcm_loop_abort_task(struct scsi_cmnd *sc)
296 {
297 	struct tcm_loop_hba *tl_hba;
298 	struct tcm_loop_tpg *tl_tpg;
299 	int ret = FAILED;
300 
301 	/*
302 	 * Locate the tcm_loop_hba_t pointer
303 	 */
304 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
305 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
306 	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
307 				 sc->request->tag, TMR_ABORT_TASK);
308 	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
309 }
310 
311 /*
312  * Called from SCSI EH process context to issue a LUN_RESET TMR
313  * to struct scsi_device
314  */
315 static int tcm_loop_device_reset(struct scsi_cmnd *sc)
316 {
317 	struct tcm_loop_hba *tl_hba;
318 	struct tcm_loop_tpg *tl_tpg;
319 	int ret = FAILED;
320 
321 	/*
322 	 * Locate the tcm_loop_hba_t pointer
323 	 */
324 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
325 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
326 
327 	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
328 				 0, TMR_LUN_RESET);
329 	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
330 }
331 
332 static int tcm_loop_target_reset(struct scsi_cmnd *sc)
333 {
334 	struct tcm_loop_hba *tl_hba;
335 	struct tcm_loop_tpg *tl_tpg;
336 
337 	/*
338 	 * Locate the tcm_loop_hba_t pointer
339 	 */
340 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
341 	if (!tl_hba) {
342 		pr_err("Unable to perform device reset without"
343 				" active I_T Nexus\n");
344 		return FAILED;
345 	}
346 	/*
347 	 * Locate the tl_tpg pointer from TargetID in sc->device->id
348 	 */
349 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
350 	if (tl_tpg) {
351 		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
352 		return SUCCESS;
353 	}
354 	return FAILED;
355 }
356 
357 static int tcm_loop_slave_alloc(struct scsi_device *sd)
358 {
359 	set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
360 	return 0;
361 }
362 
363 static struct scsi_host_template tcm_loop_driver_template = {
364 	.show_info		= tcm_loop_show_info,
365 	.proc_name		= "tcm_loopback",
366 	.name			= "TCM_Loopback",
367 	.queuecommand		= tcm_loop_queuecommand,
368 	.change_queue_depth	= scsi_change_queue_depth,
369 	.eh_abort_handler = tcm_loop_abort_task,
370 	.eh_device_reset_handler = tcm_loop_device_reset,
371 	.eh_target_reset_handler = tcm_loop_target_reset,
372 	.can_queue		= 1024,
373 	.this_id		= -1,
374 	.sg_tablesize		= 256,
375 	.cmd_per_lun		= 1024,
376 	.max_sectors		= 0xFFFF,
377 	.use_clustering		= DISABLE_CLUSTERING,
378 	.slave_alloc		= tcm_loop_slave_alloc,
379 	.module			= THIS_MODULE,
380 	.use_blk_tags		= 1,
381 	.track_queue_depth	= 1,
382 };
383 
384 static int tcm_loop_driver_probe(struct device *dev)
385 {
386 	struct tcm_loop_hba *tl_hba;
387 	struct Scsi_Host *sh;
388 	int error, host_prot;
389 
390 	tl_hba = to_tcm_loop_hba(dev);
391 
392 	sh = scsi_host_alloc(&tcm_loop_driver_template,
393 			sizeof(struct tcm_loop_hba));
394 	if (!sh) {
395 		pr_err("Unable to allocate struct scsi_host\n");
396 		return -ENODEV;
397 	}
398 	tl_hba->sh = sh;
399 
400 	/*
401 	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
402 	 */
403 	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
404 	/*
405 	 * Setup single ID, Channel and LUN for now..
406 	 */
407 	sh->max_id = 2;
408 	sh->max_lun = 0;
409 	sh->max_channel = 0;
410 	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
411 
412 	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
413 		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
414 		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
415 
416 	scsi_host_set_prot(sh, host_prot);
417 	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
418 
419 	error = scsi_add_host(sh, &tl_hba->dev);
420 	if (error) {
421 		pr_err("%s: scsi_add_host failed\n", __func__);
422 		scsi_host_put(sh);
423 		return -ENODEV;
424 	}
425 	return 0;
426 }
427 
428 static int tcm_loop_driver_remove(struct device *dev)
429 {
430 	struct tcm_loop_hba *tl_hba;
431 	struct Scsi_Host *sh;
432 
433 	tl_hba = to_tcm_loop_hba(dev);
434 	sh = tl_hba->sh;
435 
436 	scsi_remove_host(sh);
437 	scsi_host_put(sh);
438 	return 0;
439 }
440 
441 static void tcm_loop_release_adapter(struct device *dev)
442 {
443 	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
444 
445 	kfree(tl_hba);
446 }
447 
448 /*
449  * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
450  */
451 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
452 {
453 	int ret;
454 
455 	tl_hba->dev.bus = &tcm_loop_lld_bus;
456 	tl_hba->dev.parent = tcm_loop_primary;
457 	tl_hba->dev.release = &tcm_loop_release_adapter;
458 	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
459 
460 	ret = device_register(&tl_hba->dev);
461 	if (ret) {
462 		pr_err("device_register() failed for"
463 				" tl_hba->dev: %d\n", ret);
464 		return -ENODEV;
465 	}
466 
467 	return 0;
468 }
469 
470 /*
471  * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
472  * tcm_loop SCSI bus.
473  */
474 static int tcm_loop_alloc_core_bus(void)
475 {
476 	int ret;
477 
478 	tcm_loop_primary = root_device_register("tcm_loop_0");
479 	if (IS_ERR(tcm_loop_primary)) {
480 		pr_err("Unable to allocate tcm_loop_primary\n");
481 		return PTR_ERR(tcm_loop_primary);
482 	}
483 
484 	ret = bus_register(&tcm_loop_lld_bus);
485 	if (ret) {
486 		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
487 		goto dev_unreg;
488 	}
489 
490 	ret = driver_register(&tcm_loop_driverfs);
491 	if (ret) {
492 		pr_err("driver_register() failed for"
493 				"tcm_loop_driverfs\n");
494 		goto bus_unreg;
495 	}
496 
497 	pr_debug("Initialized TCM Loop Core Bus\n");
498 	return ret;
499 
500 bus_unreg:
501 	bus_unregister(&tcm_loop_lld_bus);
502 dev_unreg:
503 	root_device_unregister(tcm_loop_primary);
504 	return ret;
505 }
506 
507 static void tcm_loop_release_core_bus(void)
508 {
509 	driver_unregister(&tcm_loop_driverfs);
510 	bus_unregister(&tcm_loop_lld_bus);
511 	root_device_unregister(tcm_loop_primary);
512 
513 	pr_debug("Releasing TCM Loop Core BUS\n");
514 }
515 
516 static char *tcm_loop_get_fabric_name(void)
517 {
518 	return "loopback";
519 }
520 
521 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
522 {
523 	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
524 }
525 
526 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
527 {
528 	/*
529 	 * Return the passed NAA identifier for the Target Port
530 	 */
531 	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
532 }
533 
534 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
535 {
536 	/*
537 	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
538 	 * to represent the SCSI Target Port.
539 	 */
540 	return tl_tpg(se_tpg)->tl_tpgt;
541 }
542 
543 /*
544  * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
545  * based upon the incoming fabric dependent SCSI Initiator Port
546  */
547 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
548 {
549 	return 1;
550 }
551 
552 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
553 {
554 	return 0;
555 }
556 
557 /*
558  * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
559  * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
560  */
561 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
562 {
563 	return 0;
564 }
565 
566 /*
567  * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
568  * never be called for TCM_Loop by target_core_fabric_configfs.c code.
569  * It has been added here as a nop for target_fabric_tf_ops_check()
570  */
571 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
572 {
573 	return 0;
574 }
575 
576 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
577 {
578 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
579 						   tl_se_tpg);
580 	return tl_tpg->tl_fabric_prot_type;
581 }
582 
583 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
584 {
585 	return 1;
586 }
587 
588 static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
589 {
590 	return 1;
591 }
592 
593 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
594 {
595 	return;
596 }
597 
598 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
599 {
600 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
601 			struct tcm_loop_cmd, tl_se_cmd);
602 
603 	return tl_cmd->sc_cmd_state;
604 }
605 
606 static int tcm_loop_shutdown_session(struct se_session *se_sess)
607 {
608 	return 0;
609 }
610 
611 static void tcm_loop_close_session(struct se_session *se_sess)
612 {
613 	return;
614 };
615 
616 static int tcm_loop_write_pending(struct se_cmd *se_cmd)
617 {
618 	/*
619 	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
620 	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
621 	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
622 	 * format with transport_generic_map_mem_to_cmd().
623 	 *
624 	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
625 	 * object execution queue.
626 	 */
627 	target_execute_cmd(se_cmd);
628 	return 0;
629 }
630 
631 static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
632 {
633 	return 0;
634 }
635 
636 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
637 {
638 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
639 				struct tcm_loop_cmd, tl_se_cmd);
640 	struct scsi_cmnd *sc = tl_cmd->sc;
641 
642 	pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
643 		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);
644 
645 	sc->result = SAM_STAT_GOOD;
646 	set_host_byte(sc, DID_OK);
647 	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
648 	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
649 		scsi_set_resid(sc, se_cmd->residual_count);
650 	sc->scsi_done(sc);
651 	return 0;
652 }
653 
654 static int tcm_loop_queue_status(struct se_cmd *se_cmd)
655 {
656 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
657 				struct tcm_loop_cmd, tl_se_cmd);
658 	struct scsi_cmnd *sc = tl_cmd->sc;
659 
660 	pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
661 			" cdb: 0x%02x\n", sc, sc->cmnd[0]);
662 
663 	if (se_cmd->sense_buffer &&
664 	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
665 	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
666 
667 		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
668 				SCSI_SENSE_BUFFERSIZE);
669 		sc->result = SAM_STAT_CHECK_CONDITION;
670 		set_driver_byte(sc, DRIVER_SENSE);
671 	} else
672 		sc->result = se_cmd->scsi_status;
673 
674 	set_host_byte(sc, DID_OK);
675 	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
676 	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
677 		scsi_set_resid(sc, se_cmd->residual_count);
678 	sc->scsi_done(sc);
679 	return 0;
680 }
681 
682 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
683 {
684 	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
685 	struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
686 	/*
687 	 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
688 	 * and wake up the wait_queue_head_t in tcm_loop_device_reset()
689 	 */
690 	atomic_set(&tl_tmr->tmr_complete, 1);
691 	wake_up(&tl_tmr->tl_tmr_wait);
692 }
693 
694 static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
695 {
696 	return;
697 }
698 
699 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
700 {
701 	switch (tl_hba->tl_proto_id) {
702 	case SCSI_PROTOCOL_SAS:
703 		return "SAS";
704 	case SCSI_PROTOCOL_FCP:
705 		return "FCP";
706 	case SCSI_PROTOCOL_ISCSI:
707 		return "iSCSI";
708 	default:
709 		break;
710 	}
711 
712 	return "Unknown";
713 }
714 
715 /* Start items for tcm_loop_port_cit */
716 
717 static int tcm_loop_port_link(
718 	struct se_portal_group *se_tpg,
719 	struct se_lun *lun)
720 {
721 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
722 				struct tcm_loop_tpg, tl_se_tpg);
723 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
724 
725 	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
726 	/*
727 	 * Add Linux/SCSI struct scsi_device by HCTL
728 	 */
729 	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
730 
731 	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
732 	return 0;
733 }
734 
735 static void tcm_loop_port_unlink(
736 	struct se_portal_group *se_tpg,
737 	struct se_lun *se_lun)
738 {
739 	struct scsi_device *sd;
740 	struct tcm_loop_hba *tl_hba;
741 	struct tcm_loop_tpg *tl_tpg;
742 
743 	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
744 	tl_hba = tl_tpg->tl_hba;
745 
746 	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
747 				se_lun->unpacked_lun);
748 	if (!sd) {
749 		pr_err("Unable to locate struct scsi_device for %d:%d:"
750 			"%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
751 		return;
752 	}
753 	/*
754 	 * Remove Linux/SCSI struct scsi_device by HCTL
755 	 */
756 	scsi_remove_device(sd);
757 	scsi_device_put(sd);
758 
759 	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
760 
761 	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
762 }
763 
764 /* End items for tcm_loop_port_cit */
765 
766 static ssize_t tcm_loop_tpg_attrib_show_fabric_prot_type(
767 	struct se_portal_group *se_tpg,
768 	char *page)
769 {
770 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
771 						   tl_se_tpg);
772 
773 	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
774 }
775 
776 static ssize_t tcm_loop_tpg_attrib_store_fabric_prot_type(
777 	struct se_portal_group *se_tpg,
778 	const char *page,
779 	size_t count)
780 {
781 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
782 						   tl_se_tpg);
783 	unsigned long val;
784 	int ret = kstrtoul(page, 0, &val);
785 
786 	if (ret) {
787 		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
788 		return ret;
789 	}
790 	if (val != 0 && val != 1 && val != 3) {
791 		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
792 		return -EINVAL;
793 	}
794 	tl_tpg->tl_fabric_prot_type = val;
795 
796 	return count;
797 }
798 
799 TF_TPG_ATTRIB_ATTR(tcm_loop, fabric_prot_type, S_IRUGO | S_IWUSR);
800 
801 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
802 	&tcm_loop_tpg_attrib_fabric_prot_type.attr,
803 	NULL,
804 };
805 
806 /* Start items for tcm_loop_nexus_cit */
807 
808 static int tcm_loop_make_nexus(
809 	struct tcm_loop_tpg *tl_tpg,
810 	const char *name)
811 {
812 	struct se_portal_group *se_tpg;
813 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
814 	struct tcm_loop_nexus *tl_nexus;
815 	int ret = -ENOMEM;
816 
817 	if (tl_tpg->tl_nexus) {
818 		pr_debug("tl_tpg->tl_nexus already exists\n");
819 		return -EEXIST;
820 	}
821 	se_tpg = &tl_tpg->tl_se_tpg;
822 
823 	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
824 	if (!tl_nexus) {
825 		pr_err("Unable to allocate struct tcm_loop_nexus\n");
826 		return -ENOMEM;
827 	}
828 	/*
829 	 * Initialize the struct se_session pointer
830 	 */
831 	tl_nexus->se_sess = transport_init_session(
832 				TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
833 	if (IS_ERR(tl_nexus->se_sess)) {
834 		ret = PTR_ERR(tl_nexus->se_sess);
835 		goto out;
836 	}
837 	/*
838 	 * Since we are running in 'demo mode' this call with generate a
839 	 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
840 	 * Initiator port name of the passed configfs group 'name'.
841 	 */
842 	tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
843 				se_tpg, (unsigned char *)name);
844 	if (!tl_nexus->se_sess->se_node_acl) {
845 		transport_free_session(tl_nexus->se_sess);
846 		goto out;
847 	}
848 	/* Now, register the I_T Nexus as active. */
849 	transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
850 			tl_nexus->se_sess, tl_nexus);
851 	tl_tpg->tl_nexus = tl_nexus;
852 	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
853 		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
854 		name);
855 	return 0;
856 
857 out:
858 	kfree(tl_nexus);
859 	return ret;
860 }
861 
862 static int tcm_loop_drop_nexus(
863 	struct tcm_loop_tpg *tpg)
864 {
865 	struct se_session *se_sess;
866 	struct tcm_loop_nexus *tl_nexus;
867 
868 	tl_nexus = tpg->tl_nexus;
869 	if (!tl_nexus)
870 		return -ENODEV;
871 
872 	se_sess = tl_nexus->se_sess;
873 	if (!se_sess)
874 		return -ENODEV;
875 
876 	if (atomic_read(&tpg->tl_tpg_port_count)) {
877 		pr_err("Unable to remove TCM_Loop I_T Nexus with"
878 			" active TPG port count: %d\n",
879 			atomic_read(&tpg->tl_tpg_port_count));
880 		return -EPERM;
881 	}
882 
883 	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
884 		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
885 		tl_nexus->se_sess->se_node_acl->initiatorname);
886 	/*
887 	 * Release the SCSI I_T Nexus to the emulated Target Port
888 	 */
889 	transport_deregister_session(tl_nexus->se_sess);
890 	tpg->tl_nexus = NULL;
891 	kfree(tl_nexus);
892 	return 0;
893 }
894 
895 /* End items for tcm_loop_nexus_cit */
896 
897 static ssize_t tcm_loop_tpg_show_nexus(
898 	struct se_portal_group *se_tpg,
899 	char *page)
900 {
901 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
902 			struct tcm_loop_tpg, tl_se_tpg);
903 	struct tcm_loop_nexus *tl_nexus;
904 	ssize_t ret;
905 
906 	tl_nexus = tl_tpg->tl_nexus;
907 	if (!tl_nexus)
908 		return -ENODEV;
909 
910 	ret = snprintf(page, PAGE_SIZE, "%s\n",
911 		tl_nexus->se_sess->se_node_acl->initiatorname);
912 
913 	return ret;
914 }
915 
916 static ssize_t tcm_loop_tpg_store_nexus(
917 	struct se_portal_group *se_tpg,
918 	const char *page,
919 	size_t count)
920 {
921 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
922 			struct tcm_loop_tpg, tl_se_tpg);
923 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
924 	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
925 	int ret;
926 	/*
927 	 * Shutdown the active I_T nexus if 'NULL' is passed..
928 	 */
929 	if (!strncmp(page, "NULL", 4)) {
930 		ret = tcm_loop_drop_nexus(tl_tpg);
931 		return (!ret) ? count : ret;
932 	}
933 	/*
934 	 * Otherwise make sure the passed virtual Initiator port WWN matches
935 	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
936 	 * tcm_loop_make_nexus()
937 	 */
938 	if (strlen(page) >= TL_WWN_ADDR_LEN) {
939 		pr_err("Emulated NAA Sas Address: %s, exceeds"
940 				" max: %d\n", page, TL_WWN_ADDR_LEN);
941 		return -EINVAL;
942 	}
943 	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
944 
945 	ptr = strstr(i_port, "naa.");
946 	if (ptr) {
947 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
948 			pr_err("Passed SAS Initiator Port %s does not"
949 				" match target port protoid: %s\n", i_port,
950 				tcm_loop_dump_proto_id(tl_hba));
951 			return -EINVAL;
952 		}
953 		port_ptr = &i_port[0];
954 		goto check_newline;
955 	}
956 	ptr = strstr(i_port, "fc.");
957 	if (ptr) {
958 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
959 			pr_err("Passed FCP Initiator Port %s does not"
960 				" match target port protoid: %s\n", i_port,
961 				tcm_loop_dump_proto_id(tl_hba));
962 			return -EINVAL;
963 		}
964 		port_ptr = &i_port[3]; /* Skip over "fc." */
965 		goto check_newline;
966 	}
967 	ptr = strstr(i_port, "iqn.");
968 	if (ptr) {
969 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
970 			pr_err("Passed iSCSI Initiator Port %s does not"
971 				" match target port protoid: %s\n", i_port,
972 				tcm_loop_dump_proto_id(tl_hba));
973 			return -EINVAL;
974 		}
975 		port_ptr = &i_port[0];
976 		goto check_newline;
977 	}
978 	pr_err("Unable to locate prefix for emulated Initiator Port:"
979 			" %s\n", i_port);
980 	return -EINVAL;
981 	/*
982 	 * Clear any trailing newline for the NAA WWN
983 	 */
984 check_newline:
985 	if (i_port[strlen(i_port)-1] == '\n')
986 		i_port[strlen(i_port)-1] = '\0';
987 
988 	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
989 	if (ret < 0)
990 		return ret;
991 
992 	return count;
993 }
994 
995 TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
996 
997 static ssize_t tcm_loop_tpg_show_transport_status(
998 	struct se_portal_group *se_tpg,
999 	char *page)
1000 {
1001 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1002 			struct tcm_loop_tpg, tl_se_tpg);
1003 	const char *status = NULL;
1004 	ssize_t ret = -EINVAL;
1005 
1006 	switch (tl_tpg->tl_transport_status) {
1007 	case TCM_TRANSPORT_ONLINE:
1008 		status = "online";
1009 		break;
1010 	case TCM_TRANSPORT_OFFLINE:
1011 		status = "offline";
1012 		break;
1013 	default:
1014 		break;
1015 	}
1016 
1017 	if (status)
1018 		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
1019 
1020 	return ret;
1021 }
1022 
1023 static ssize_t tcm_loop_tpg_store_transport_status(
1024 	struct se_portal_group *se_tpg,
1025 	const char *page,
1026 	size_t count)
1027 {
1028 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1029 			struct tcm_loop_tpg, tl_se_tpg);
1030 
1031 	if (!strncmp(page, "online", 6)) {
1032 		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
1033 		return count;
1034 	}
1035 	if (!strncmp(page, "offline", 7)) {
1036 		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
1037 		if (tl_tpg->tl_nexus) {
1038 			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
1039 
1040 			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
1041 		}
1042 		return count;
1043 	}
1044 	return -EINVAL;
1045 }
1046 
1047 TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
1048 
1049 static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
1050 	&tcm_loop_tpg_nexus.attr,
1051 	&tcm_loop_tpg_transport_status.attr,
1052 	NULL,
1053 };
1054 
1055 /* Start items for tcm_loop_naa_cit */
1056 
1057 static struct se_portal_group *tcm_loop_make_naa_tpg(
1058 	struct se_wwn *wwn,
1059 	struct config_group *group,
1060 	const char *name)
1061 {
1062 	struct tcm_loop_hba *tl_hba = container_of(wwn,
1063 			struct tcm_loop_hba, tl_hba_wwn);
1064 	struct tcm_loop_tpg *tl_tpg;
1065 	int ret;
1066 	unsigned long tpgt;
1067 
1068 	if (strstr(name, "tpgt_") != name) {
1069 		pr_err("Unable to locate \"tpgt_#\" directory"
1070 				" group\n");
1071 		return ERR_PTR(-EINVAL);
1072 	}
1073 	if (kstrtoul(name+5, 10, &tpgt))
1074 		return ERR_PTR(-EINVAL);
1075 
1076 	if (tpgt >= TL_TPGS_PER_HBA) {
1077 		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
1078 				" %u\n", tpgt, TL_TPGS_PER_HBA);
1079 		return ERR_PTR(-EINVAL);
1080 	}
1081 	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
1082 	tl_tpg->tl_hba = tl_hba;
1083 	tl_tpg->tl_tpgt = tpgt;
1084 	/*
1085 	 * Register the tl_tpg as a emulated TCM Target Endpoint
1086 	 */
1087 	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
1088 	if (ret < 0)
1089 		return ERR_PTR(-ENOMEM);
1090 
1091 	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
1092 		" Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
1093 		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1094 
1095 	return &tl_tpg->tl_se_tpg;
1096 }
1097 
1098 static void tcm_loop_drop_naa_tpg(
1099 	struct se_portal_group *se_tpg)
1100 {
1101 	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
1102 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1103 				struct tcm_loop_tpg, tl_se_tpg);
1104 	struct tcm_loop_hba *tl_hba;
1105 	unsigned short tpgt;
1106 
1107 	tl_hba = tl_tpg->tl_hba;
1108 	tpgt = tl_tpg->tl_tpgt;
1109 	/*
1110 	 * Release the I_T Nexus for the Virtual target link if present
1111 	 */
1112 	tcm_loop_drop_nexus(tl_tpg);
1113 	/*
1114 	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1115 	 */
1116 	core_tpg_deregister(se_tpg);
1117 
1118 	tl_tpg->tl_hba = NULL;
1119 	tl_tpg->tl_tpgt = 0;
1120 
1121 	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
1122 		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
1123 		config_item_name(&wwn->wwn_group.cg_item), tpgt);
1124 }
1125 
1126 /* End items for tcm_loop_naa_cit */
1127 
1128 /* Start items for tcm_loop_cit */
1129 
1130 static struct se_wwn *tcm_loop_make_scsi_hba(
1131 	struct target_fabric_configfs *tf,
1132 	struct config_group *group,
1133 	const char *name)
1134 {
1135 	struct tcm_loop_hba *tl_hba;
1136 	struct Scsi_Host *sh;
1137 	char *ptr;
1138 	int ret, off = 0;
1139 
1140 	tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
1141 	if (!tl_hba) {
1142 		pr_err("Unable to allocate struct tcm_loop_hba\n");
1143 		return ERR_PTR(-ENOMEM);
1144 	}
1145 	/*
1146 	 * Determine the emulated Protocol Identifier and Target Port Name
1147 	 * based on the incoming configfs directory name.
1148 	 */
1149 	ptr = strstr(name, "naa.");
1150 	if (ptr) {
1151 		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1152 		goto check_len;
1153 	}
1154 	ptr = strstr(name, "fc.");
1155 	if (ptr) {
1156 		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1157 		off = 3; /* Skip over "fc." */
1158 		goto check_len;
1159 	}
1160 	ptr = strstr(name, "iqn.");
1161 	if (!ptr) {
1162 		pr_err("Unable to locate prefix for emulated Target "
1163 				"Port: %s\n", name);
1164 		ret = -EINVAL;
1165 		goto out;
1166 	}
1167 	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1168 
1169 check_len:
1170 	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1171 		pr_err("Emulated NAA %s Address: %s, exceeds"
1172 			" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
1173 			TL_WWN_ADDR_LEN);
1174 		ret = -EINVAL;
1175 		goto out;
1176 	}
1177 	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1178 
1179 	/*
1180 	 * Call device_register(tl_hba->dev) to register the emulated
1181 	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1182 	 * device_register() callbacks in tcm_loop_driver_probe()
1183 	 */
1184 	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1185 	if (ret)
1186 		goto out;
1187 
1188 	sh = tl_hba->sh;
1189 	tcm_loop_hba_no_cnt++;
1190 	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
1191 		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1192 		tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1193 
1194 	return &tl_hba->tl_hba_wwn;
1195 out:
1196 	kfree(tl_hba);
1197 	return ERR_PTR(ret);
1198 }
1199 
1200 static void tcm_loop_drop_scsi_hba(
1201 	struct se_wwn *wwn)
1202 {
1203 	struct tcm_loop_hba *tl_hba = container_of(wwn,
1204 				struct tcm_loop_hba, tl_hba_wwn);
1205 
1206 	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
1207 		" %s Address: %s at Linux/SCSI Host ID: %d\n",
1208 		tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1209 		tl_hba->sh->host_no);
1210 	/*
1211 	 * Call device_unregister() on the original tl_hba->dev.
1212 	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1213 	 * release *tl_hba;
1214 	 */
1215 	device_unregister(&tl_hba->dev);
1216 }
1217 
1218 /* Start items for tcm_loop_cit */
1219 static ssize_t tcm_loop_wwn_show_attr_version(
1220 	struct target_fabric_configfs *tf,
1221 	char *page)
1222 {
1223 	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1224 }
1225 
1226 TF_WWN_ATTR_RO(tcm_loop, version);
1227 
1228 static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1229 	&tcm_loop_wwn_version.attr,
1230 	NULL,
1231 };
1232 
1233 /* End items for tcm_loop_cit */
1234 
1235 static const struct target_core_fabric_ops loop_ops = {
1236 	.module				= THIS_MODULE,
1237 	.name				= "loopback",
1238 	.get_fabric_name		= tcm_loop_get_fabric_name,
1239 	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
1240 	.tpg_get_tag			= tcm_loop_get_tag,
1241 	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
1242 	.tpg_check_demo_mode_cache	= tcm_loop_check_demo_mode_cache,
1243 	.tpg_check_demo_mode_write_protect =
1244 				tcm_loop_check_demo_mode_write_protect,
1245 	.tpg_check_prod_mode_write_protect =
1246 				tcm_loop_check_prod_mode_write_protect,
1247 	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
1248 	.tpg_get_inst_index		= tcm_loop_get_inst_index,
1249 	.check_stop_free		= tcm_loop_check_stop_free,
1250 	.release_cmd			= tcm_loop_release_cmd,
1251 	.shutdown_session		= tcm_loop_shutdown_session,
1252 	.close_session			= tcm_loop_close_session,
1253 	.sess_get_index			= tcm_loop_sess_get_index,
1254 	.write_pending			= tcm_loop_write_pending,
1255 	.write_pending_status		= tcm_loop_write_pending_status,
1256 	.set_default_node_attributes	= tcm_loop_set_default_node_attributes,
1257 	.get_cmd_state			= tcm_loop_get_cmd_state,
1258 	.queue_data_in			= tcm_loop_queue_data_in,
1259 	.queue_status			= tcm_loop_queue_status,
1260 	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
1261 	.aborted_task			= tcm_loop_aborted_task,
1262 	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
1263 	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
1264 	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
1265 	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
1266 	.fabric_post_link		= tcm_loop_port_link,
1267 	.fabric_pre_unlink		= tcm_loop_port_unlink,
1268 	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
1269 	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
1270 	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
1271 };
1272 
1273 static int __init tcm_loop_fabric_init(void)
1274 {
1275 	int ret = -ENOMEM;
1276 
1277 	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
1278 	if (!tcm_loop_workqueue)
1279 		goto out;
1280 
1281 	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1282 				sizeof(struct tcm_loop_cmd),
1283 				__alignof__(struct tcm_loop_cmd),
1284 				0, NULL);
1285 	if (!tcm_loop_cmd_cache) {
1286 		pr_debug("kmem_cache_create() for"
1287 			" tcm_loop_cmd_cache failed\n");
1288 		goto out_destroy_workqueue;
1289 	}
1290 
1291 	ret = tcm_loop_alloc_core_bus();
1292 	if (ret)
1293 		goto out_destroy_cache;
1294 
1295 	ret = target_register_template(&loop_ops);
1296 	if (ret)
1297 		goto out_release_core_bus;
1298 
1299 	return 0;
1300 
1301 out_release_core_bus:
1302 	tcm_loop_release_core_bus();
1303 out_destroy_cache:
1304 	kmem_cache_destroy(tcm_loop_cmd_cache);
1305 out_destroy_workqueue:
1306 	destroy_workqueue(tcm_loop_workqueue);
1307 out:
1308 	return ret;
1309 }
1310 
1311 static void __exit tcm_loop_fabric_exit(void)
1312 {
1313 	target_unregister_template(&loop_ops);
1314 	tcm_loop_release_core_bus();
1315 	kmem_cache_destroy(tcm_loop_cmd_cache);
1316 	destroy_workqueue(tcm_loop_workqueue);
1317 }
1318 
1319 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1320 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1321 MODULE_LICENSE("GPL");
1322 module_init(tcm_loop_fabric_init);
1323 module_exit(tcm_loop_fabric_exit);
1324