xref: /linux/drivers/target/loopback/tcm_loop.c (revision 2e3fcbcc3b0eb9b96d2912cdac920f0ae8d1c8f2)
1 /*******************************************************************************
2  *
3  * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4  * for emulated SAS initiator ports
5  *
6  * © Copyright 2011-2013 Datera, Inc.
7  *
8  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9  *
10  * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  ****************************************************************************/
22 
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/configfs.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_cmnd.h>
34 
35 #include <target/target_core_base.h>
36 #include <target/target_core_fabric.h>
37 
38 #include "tcm_loop.h"
39 
40 #define to_tcm_loop_hba(hba)	container_of(hba, struct tcm_loop_hba, dev)
41 
42 static struct kmem_cache *tcm_loop_cmd_cache;
43 
44 static int tcm_loop_hba_no_cnt;
45 
46 static int tcm_loop_queue_status(struct se_cmd *se_cmd);
47 
48 static unsigned int tcm_loop_nr_hw_queues = 1;
49 module_param_named(nr_hw_queues, tcm_loop_nr_hw_queues, uint, 0644);
50 
51 static unsigned int tcm_loop_can_queue = 1024;
52 module_param_named(can_queue, tcm_loop_can_queue, uint, 0644);
53 
54 static unsigned int tcm_loop_cmd_per_lun = 1024;
55 module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644);
56 
57 /*
58  * Called from struct target_core_fabric_ops->check_stop_free()
59  */
tcm_loop_check_stop_free(struct se_cmd * se_cmd)60 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
61 {
62 	return transport_generic_free_cmd(se_cmd, 0);
63 }
64 
tcm_loop_release_cmd(struct se_cmd * se_cmd)65 static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
66 {
67 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
68 				struct tcm_loop_cmd, tl_se_cmd);
69 	struct scsi_cmnd *sc = tl_cmd->sc;
70 
71 	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
72 		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
73 	else
74 		scsi_done(sc);
75 }
76 
tcm_loop_show_info(struct seq_file * m,struct Scsi_Host * host)77 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
78 {
79 	seq_puts(m, "tcm_loop_proc_info()\n");
80 	return 0;
81 }
82 
83 static int tcm_loop_driver_probe(struct device *);
84 static void tcm_loop_driver_remove(struct device *);
85 
86 static const struct bus_type tcm_loop_lld_bus = {
87 	.name			= "tcm_loop_bus",
88 	.probe			= tcm_loop_driver_probe,
89 	.remove			= tcm_loop_driver_remove,
90 };
91 
92 static struct device_driver tcm_loop_driverfs = {
93 	.name			= "tcm_loop",
94 	.bus			= &tcm_loop_lld_bus,
95 };
96 /*
97  * Used with root_device_register() in tcm_loop_alloc_core_bus() below
98  */
99 static struct device *tcm_loop_primary;
100 
tcm_loop_target_queue_cmd(struct tcm_loop_cmd * tl_cmd)101 static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd)
102 {
103 	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
104 	struct scsi_cmnd *sc = tl_cmd->sc;
105 	struct tcm_loop_nexus *tl_nexus;
106 	struct tcm_loop_hba *tl_hba;
107 	struct tcm_loop_tpg *tl_tpg;
108 	struct scatterlist *sgl_bidi = NULL;
109 	u32 sgl_bidi_count = 0, transfer_length;
110 
111 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
112 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
113 
114 	/*
115 	 * Ensure that this tl_tpg reference from the incoming sc->device->id
116 	 * has already been configured via tcm_loop_make_naa_tpg().
117 	 */
118 	if (!tl_tpg->tl_hba) {
119 		set_host_byte(sc, DID_NO_CONNECT);
120 		goto out_done;
121 	}
122 	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
123 		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
124 		goto out_done;
125 	}
126 	tl_nexus = tl_tpg->tl_nexus;
127 	if (!tl_nexus) {
128 		scmd_printk(KERN_ERR, sc,
129 			    "TCM_Loop I_T Nexus does not exist\n");
130 		set_host_byte(sc, DID_ERROR);
131 		goto out_done;
132 	}
133 
134 	transfer_length = scsi_transfer_length(sc);
135 	if (!scsi_prot_sg_count(sc) &&
136 	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
137 		se_cmd->prot_pto = true;
138 		/*
139 		 * loopback transport doesn't support
140 		 * WRITE_GENERATE, READ_STRIP protection
141 		 * information operations, go ahead unprotected.
142 		 */
143 		transfer_length = scsi_bufflen(sc);
144 	}
145 
146 	se_cmd->tag = tl_cmd->sc_cmd_tag;
147 	target_init_cmd(se_cmd, tl_nexus->se_sess, &tl_cmd->tl_sense_buf[0],
148 			tl_cmd->sc->device->lun, transfer_length,
149 			TCM_SIMPLE_TAG, sc->sc_data_direction, 0);
150 
151 	if (target_submit_prep(se_cmd, sc->cmnd, scsi_sglist(sc),
152 			       scsi_sg_count(sc), sgl_bidi, sgl_bidi_count,
153 			       scsi_prot_sglist(sc), scsi_prot_sg_count(sc),
154 			       GFP_ATOMIC))
155 		return;
156 
157 	target_submit(se_cmd);
158 	return;
159 
160 out_done:
161 	scsi_done(sc);
162 }
163 
164 /*
165  * ->queuecommand can be and usually is called from interrupt context, so
166  * defer the actual submission to a workqueue.
167  */
tcm_loop_queuecommand(struct Scsi_Host * sh,struct scsi_cmnd * sc)168 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
169 {
170 	struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
171 
172 	pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
173 		 __func__, sc->device->host->host_no, sc->device->id,
174 		 sc->device->channel, sc->device->lun, sc->cmnd[0],
175 		 scsi_bufflen(sc));
176 
177 	memset(tl_cmd, 0, sizeof(*tl_cmd));
178 	tl_cmd->sc = sc;
179 	tl_cmd->sc_cmd_tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
180 
181 	tcm_loop_target_queue_cmd(tl_cmd);
182 	return 0;
183 }
184 
185 /*
186  * Called from SCSI EH process context to issue a LUN_RESET TMR
187  * to struct scsi_device
188  */
tcm_loop_issue_tmr(struct tcm_loop_tpg * tl_tpg,u64 lun,int task,enum tcm_tmreq_table tmr)189 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
190 			      u64 lun, int task, enum tcm_tmreq_table tmr)
191 {
192 	struct se_cmd *se_cmd;
193 	struct se_session *se_sess;
194 	struct tcm_loop_nexus *tl_nexus;
195 	struct tcm_loop_cmd *tl_cmd;
196 	int ret = TMR_FUNCTION_FAILED, rc;
197 
198 	/*
199 	 * Locate the tl_nexus and se_sess pointers
200 	 */
201 	tl_nexus = tl_tpg->tl_nexus;
202 	if (!tl_nexus) {
203 		pr_err("Unable to perform device reset without active I_T Nexus\n");
204 		return ret;
205 	}
206 
207 	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
208 	if (!tl_cmd)
209 		return ret;
210 
211 	init_completion(&tl_cmd->tmr_done);
212 
213 	se_cmd = &tl_cmd->tl_se_cmd;
214 	se_sess = tl_tpg->tl_nexus->se_sess;
215 
216 	rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
217 			       NULL, tmr, GFP_KERNEL, task,
218 			       TARGET_SCF_ACK_KREF);
219 	if (rc < 0)
220 		goto release;
221 	wait_for_completion(&tl_cmd->tmr_done);
222 	ret = se_cmd->se_tmr_req->response;
223 	target_put_sess_cmd(se_cmd);
224 
225 out:
226 	return ret;
227 
228 release:
229 	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
230 	goto out;
231 }
232 
tcm_loop_abort_task(struct scsi_cmnd * sc)233 static int tcm_loop_abort_task(struct scsi_cmnd *sc)
234 {
235 	struct tcm_loop_hba *tl_hba;
236 	struct tcm_loop_tpg *tl_tpg;
237 	int ret;
238 
239 	/*
240 	 * Locate the tcm_loop_hba_t pointer
241 	 */
242 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
243 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
244 	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
245 				 blk_mq_unique_tag(scsi_cmd_to_rq(sc)),
246 				 TMR_ABORT_TASK);
247 	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
248 }
249 
250 /*
251  * Called from SCSI EH process context to issue a LUN_RESET TMR
252  * to struct scsi_device
253  */
tcm_loop_device_reset(struct scsi_cmnd * sc)254 static int tcm_loop_device_reset(struct scsi_cmnd *sc)
255 {
256 	struct tcm_loop_hba *tl_hba;
257 	struct tcm_loop_tpg *tl_tpg;
258 	int ret;
259 
260 	/*
261 	 * Locate the tcm_loop_hba_t pointer
262 	 */
263 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
264 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
265 
266 	ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
267 				 0, TMR_LUN_RESET);
268 	return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
269 }
270 
tcm_loop_target_reset(struct scsi_cmnd * sc)271 static int tcm_loop_target_reset(struct scsi_cmnd *sc)
272 {
273 	struct tcm_loop_hba *tl_hba;
274 	struct tcm_loop_tpg *tl_tpg;
275 
276 	/*
277 	 * Locate the tcm_loop_hba_t pointer
278 	 */
279 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
280 	if (!tl_hba) {
281 		pr_err("Unable to perform device reset without active I_T Nexus\n");
282 		return FAILED;
283 	}
284 	/*
285 	 * Locate the tl_tpg pointer from TargetID in sc->device->id
286 	 */
287 	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
288 	if (tl_tpg) {
289 		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
290 		return SUCCESS;
291 	}
292 	return FAILED;
293 }
294 
295 static const struct scsi_host_template tcm_loop_driver_template = {
296 	.show_info		= tcm_loop_show_info,
297 	.proc_name		= "tcm_loopback",
298 	.name			= "TCM_Loopback",
299 	.queuecommand		= tcm_loop_queuecommand,
300 	.change_queue_depth	= scsi_change_queue_depth,
301 	.eh_abort_handler = tcm_loop_abort_task,
302 	.eh_device_reset_handler = tcm_loop_device_reset,
303 	.eh_target_reset_handler = tcm_loop_target_reset,
304 	.this_id		= -1,
305 	.sg_tablesize		= 256,
306 	.max_sectors		= 0xFFFF,
307 	.dma_boundary		= PAGE_SIZE - 1,
308 	.module			= THIS_MODULE,
309 	.track_queue_depth	= 1,
310 	.cmd_size		= sizeof(struct tcm_loop_cmd),
311 };
312 
tcm_loop_driver_probe(struct device * dev)313 static int tcm_loop_driver_probe(struct device *dev)
314 {
315 	struct tcm_loop_hba *tl_hba;
316 	struct Scsi_Host *sh;
317 	int error, host_prot;
318 
319 	tl_hba = to_tcm_loop_hba(dev);
320 
321 	sh = scsi_host_alloc(&tcm_loop_driver_template,
322 			sizeof(struct tcm_loop_hba));
323 	if (!sh) {
324 		pr_err("Unable to allocate struct scsi_host\n");
325 		return -ENODEV;
326 	}
327 	tl_hba->sh = sh;
328 
329 	/*
330 	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
331 	 */
332 	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
333 	/*
334 	 * Setup single ID, Channel and LUN for now..
335 	 */
336 	sh->max_id = 2;
337 	sh->max_lun = 0;
338 	sh->max_channel = 0;
339 	sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
340 	sh->nr_hw_queues = tcm_loop_nr_hw_queues;
341 	sh->can_queue = tcm_loop_can_queue;
342 	sh->cmd_per_lun = tcm_loop_cmd_per_lun;
343 
344 	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
345 		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
346 		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
347 
348 	scsi_host_set_prot(sh, host_prot);
349 	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
350 
351 	error = scsi_add_host(sh, &tl_hba->dev);
352 	if (error) {
353 		pr_err("%s: scsi_add_host failed\n", __func__);
354 		scsi_host_put(sh);
355 		return -ENODEV;
356 	}
357 	return 0;
358 }
359 
tcm_loop_driver_remove(struct device * dev)360 static void tcm_loop_driver_remove(struct device *dev)
361 {
362 	struct tcm_loop_hba *tl_hba;
363 	struct Scsi_Host *sh;
364 
365 	tl_hba = to_tcm_loop_hba(dev);
366 	sh = tl_hba->sh;
367 
368 	scsi_remove_host(sh);
369 	scsi_host_put(sh);
370 }
371 
tcm_loop_release_adapter(struct device * dev)372 static void tcm_loop_release_adapter(struct device *dev)
373 {
374 	struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
375 
376 	kfree(tl_hba);
377 }
378 
379 /*
380  * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
381  */
tcm_loop_setup_hba_bus(struct tcm_loop_hba * tl_hba,int tcm_loop_host_id)382 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
383 {
384 	int ret;
385 
386 	tl_hba->dev.bus = &tcm_loop_lld_bus;
387 	tl_hba->dev.parent = tcm_loop_primary;
388 	tl_hba->dev.release = &tcm_loop_release_adapter;
389 	dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
390 
391 	ret = device_register(&tl_hba->dev);
392 	if (ret) {
393 		pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
394 		put_device(&tl_hba->dev);
395 		return -ENODEV;
396 	}
397 
398 	return 0;
399 }
400 
401 /*
402  * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
403  * tcm_loop SCSI bus.
404  */
tcm_loop_alloc_core_bus(void)405 static int tcm_loop_alloc_core_bus(void)
406 {
407 	int ret;
408 
409 	tcm_loop_primary = root_device_register("tcm_loop_0");
410 	if (IS_ERR(tcm_loop_primary)) {
411 		pr_err("Unable to allocate tcm_loop_primary\n");
412 		return PTR_ERR(tcm_loop_primary);
413 	}
414 
415 	ret = bus_register(&tcm_loop_lld_bus);
416 	if (ret) {
417 		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
418 		goto dev_unreg;
419 	}
420 
421 	ret = driver_register(&tcm_loop_driverfs);
422 	if (ret) {
423 		pr_err("driver_register() failed for tcm_loop_driverfs\n");
424 		goto bus_unreg;
425 	}
426 
427 	pr_debug("Initialized TCM Loop Core Bus\n");
428 	return ret;
429 
430 bus_unreg:
431 	bus_unregister(&tcm_loop_lld_bus);
432 dev_unreg:
433 	root_device_unregister(tcm_loop_primary);
434 	return ret;
435 }
436 
tcm_loop_release_core_bus(void)437 static void tcm_loop_release_core_bus(void)
438 {
439 	driver_unregister(&tcm_loop_driverfs);
440 	bus_unregister(&tcm_loop_lld_bus);
441 	root_device_unregister(tcm_loop_primary);
442 
443 	pr_debug("Releasing TCM Loop Core BUS\n");
444 }
445 
tl_tpg(struct se_portal_group * se_tpg)446 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
447 {
448 	return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
449 }
450 
tcm_loop_get_endpoint_wwn(struct se_portal_group * se_tpg)451 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
452 {
453 	/*
454 	 * Return the passed NAA identifier for the Target Port
455 	 */
456 	return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
457 }
458 
tcm_loop_get_tag(struct se_portal_group * se_tpg)459 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
460 {
461 	/*
462 	 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
463 	 * to represent the SCSI Target Port.
464 	 */
465 	return tl_tpg(se_tpg)->tl_tpgt;
466 }
467 
468 /*
469  * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
470  * based upon the incoming fabric dependent SCSI Initiator Port
471  */
tcm_loop_check_demo_mode(struct se_portal_group * se_tpg)472 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
473 {
474 	return 1;
475 }
476 
tcm_loop_check_prot_fabric_only(struct se_portal_group * se_tpg)477 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
478 {
479 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
480 						   tl_se_tpg);
481 	return tl_tpg->tl_fabric_prot_type;
482 }
483 
tcm_loop_sess_get_index(struct se_session * se_sess)484 static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
485 {
486 	return 1;
487 }
488 
tcm_loop_get_cmd_state(struct se_cmd * se_cmd)489 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
490 {
491 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
492 			struct tcm_loop_cmd, tl_se_cmd);
493 
494 	return tl_cmd->sc_cmd_state;
495 }
496 
tcm_loop_write_pending(struct se_cmd * se_cmd)497 static int tcm_loop_write_pending(struct se_cmd *se_cmd)
498 {
499 	/*
500 	 * Since Linux/SCSI has already sent down a struct scsi_cmnd
501 	 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
502 	 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
503 	 * format with transport_generic_map_mem_to_cmd().
504 	 *
505 	 * We now tell TCM to add this WRITE CDB directly into the TCM storage
506 	 * object execution queue.
507 	 */
508 	target_execute_cmd(se_cmd);
509 	return 0;
510 }
511 
tcm_loop_queue_data_or_status(const char * func,struct se_cmd * se_cmd,u8 scsi_status)512 static int tcm_loop_queue_data_or_status(const char *func,
513 		struct se_cmd *se_cmd, u8 scsi_status)
514 {
515 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
516 				struct tcm_loop_cmd, tl_se_cmd);
517 	struct scsi_cmnd *sc = tl_cmd->sc;
518 
519 	pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
520 		 func, sc, sc->cmnd[0]);
521 
522 	if (se_cmd->sense_buffer &&
523 	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
524 	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
525 
526 		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
527 				SCSI_SENSE_BUFFERSIZE);
528 		sc->result = SAM_STAT_CHECK_CONDITION;
529 	} else
530 		sc->result = scsi_status;
531 
532 	set_host_byte(sc, DID_OK);
533 	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
534 	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
535 		scsi_set_resid(sc, se_cmd->residual_count);
536 	return 0;
537 }
538 
tcm_loop_queue_data_in(struct se_cmd * se_cmd)539 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
540 {
541 	return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD);
542 }
543 
tcm_loop_queue_status(struct se_cmd * se_cmd)544 static int tcm_loop_queue_status(struct se_cmd *se_cmd)
545 {
546 	return tcm_loop_queue_data_or_status(__func__,
547 					     se_cmd, se_cmd->scsi_status);
548 }
549 
tcm_loop_queue_tm_rsp(struct se_cmd * se_cmd)550 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
551 {
552 	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
553 				struct tcm_loop_cmd, tl_se_cmd);
554 
555 	/* Wake up tcm_loop_issue_tmr(). */
556 	complete(&tl_cmd->tmr_done);
557 }
558 
tcm_loop_aborted_task(struct se_cmd * se_cmd)559 static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
560 {
561 	return;
562 }
563 
tcm_loop_dump_proto_id(struct tcm_loop_hba * tl_hba)564 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
565 {
566 	switch (tl_hba->tl_proto_id) {
567 	case SCSI_PROTOCOL_SAS:
568 		return "SAS";
569 	case SCSI_PROTOCOL_FCP:
570 		return "FCP";
571 	case SCSI_PROTOCOL_ISCSI:
572 		return "iSCSI";
573 	default:
574 		break;
575 	}
576 
577 	return "Unknown";
578 }
579 
580 /* Start items for tcm_loop_port_cit */
581 
tcm_loop_port_link(struct se_portal_group * se_tpg,struct se_lun * lun)582 static int tcm_loop_port_link(
583 	struct se_portal_group *se_tpg,
584 	struct se_lun *lun)
585 {
586 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
587 				struct tcm_loop_tpg, tl_se_tpg);
588 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
589 
590 	atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
591 	/*
592 	 * Add Linux/SCSI struct scsi_device by HCTL
593 	 */
594 	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
595 
596 	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
597 	return 0;
598 }
599 
tcm_loop_port_unlink(struct se_portal_group * se_tpg,struct se_lun * se_lun)600 static void tcm_loop_port_unlink(
601 	struct se_portal_group *se_tpg,
602 	struct se_lun *se_lun)
603 {
604 	struct scsi_device *sd;
605 	struct tcm_loop_hba *tl_hba;
606 	struct tcm_loop_tpg *tl_tpg;
607 
608 	tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
609 	tl_hba = tl_tpg->tl_hba;
610 
611 	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
612 				se_lun->unpacked_lun);
613 	if (!sd) {
614 		pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
615 		       0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
616 		return;
617 	}
618 	/*
619 	 * Remove Linux/SCSI struct scsi_device by HCTL
620 	 */
621 	scsi_remove_device(sd);
622 	scsi_device_put(sd);
623 
624 	atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
625 
626 	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
627 }
628 
629 /* End items for tcm_loop_port_cit */
630 
tcm_loop_tpg_attrib_fabric_prot_type_show(struct config_item * item,char * page)631 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
632 		struct config_item *item, char *page)
633 {
634 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
635 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
636 						   tl_se_tpg);
637 
638 	return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
639 }
640 
tcm_loop_tpg_attrib_fabric_prot_type_store(struct config_item * item,const char * page,size_t count)641 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
642 		struct config_item *item, const char *page, size_t count)
643 {
644 	struct se_portal_group *se_tpg = attrib_to_tpg(item);
645 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
646 						   tl_se_tpg);
647 	unsigned long val;
648 	int ret = kstrtoul(page, 0, &val);
649 
650 	if (ret) {
651 		pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
652 		return ret;
653 	}
654 	if (val != 0 && val != 1 && val != 3) {
655 		pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
656 		return -EINVAL;
657 	}
658 	tl_tpg->tl_fabric_prot_type = val;
659 
660 	return count;
661 }
662 
663 CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
664 
665 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
666 	&tcm_loop_tpg_attrib_attr_fabric_prot_type,
667 	NULL,
668 };
669 
670 /* Start items for tcm_loop_nexus_cit */
671 
tcm_loop_alloc_sess_cb(struct se_portal_group * se_tpg,struct se_session * se_sess,void * p)672 static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
673 				  struct se_session *se_sess, void *p)
674 {
675 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
676 					struct tcm_loop_tpg, tl_se_tpg);
677 
678 	tl_tpg->tl_nexus = p;
679 	return 0;
680 }
681 
tcm_loop_make_nexus(struct tcm_loop_tpg * tl_tpg,const char * name)682 static int tcm_loop_make_nexus(
683 	struct tcm_loop_tpg *tl_tpg,
684 	const char *name)
685 {
686 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
687 	struct tcm_loop_nexus *tl_nexus;
688 	int ret;
689 
690 	if (tl_tpg->tl_nexus) {
691 		pr_debug("tl_tpg->tl_nexus already exists\n");
692 		return -EEXIST;
693 	}
694 
695 	tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
696 	if (!tl_nexus)
697 		return -ENOMEM;
698 
699 	tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0,
700 					TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
701 					name, tl_nexus, tcm_loop_alloc_sess_cb);
702 	if (IS_ERR(tl_nexus->se_sess)) {
703 		ret = PTR_ERR(tl_nexus->se_sess);
704 		kfree(tl_nexus);
705 		return ret;
706 	}
707 
708 	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
709 		 tcm_loop_dump_proto_id(tl_hba), name);
710 	return 0;
711 }
712 
tcm_loop_drop_nexus(struct tcm_loop_tpg * tpg)713 static int tcm_loop_drop_nexus(
714 	struct tcm_loop_tpg *tpg)
715 {
716 	struct se_session *se_sess;
717 	struct tcm_loop_nexus *tl_nexus;
718 
719 	tl_nexus = tpg->tl_nexus;
720 	if (!tl_nexus)
721 		return -ENODEV;
722 
723 	se_sess = tl_nexus->se_sess;
724 	if (!se_sess)
725 		return -ENODEV;
726 
727 	if (atomic_read(&tpg->tl_tpg_port_count)) {
728 		pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
729 		       atomic_read(&tpg->tl_tpg_port_count));
730 		return -EPERM;
731 	}
732 
733 	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
734 		 tcm_loop_dump_proto_id(tpg->tl_hba),
735 		 tl_nexus->se_sess->se_node_acl->initiatorname);
736 	/*
737 	 * Release the SCSI I_T Nexus to the emulated Target Port
738 	 */
739 	target_remove_session(se_sess);
740 	tpg->tl_nexus = NULL;
741 	kfree(tl_nexus);
742 	return 0;
743 }
744 
745 /* End items for tcm_loop_nexus_cit */
746 
tcm_loop_tpg_nexus_show(struct config_item * item,char * page)747 static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
748 {
749 	struct se_portal_group *se_tpg = to_tpg(item);
750 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
751 			struct tcm_loop_tpg, tl_se_tpg);
752 	struct tcm_loop_nexus *tl_nexus;
753 	ssize_t ret;
754 
755 	tl_nexus = tl_tpg->tl_nexus;
756 	if (!tl_nexus)
757 		return -ENODEV;
758 
759 	ret = snprintf(page, PAGE_SIZE, "%s\n",
760 		tl_nexus->se_sess->se_node_acl->initiatorname);
761 
762 	return ret;
763 }
764 
tcm_loop_tpg_nexus_store(struct config_item * item,const char * page,size_t count)765 static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
766 		const char *page, size_t count)
767 {
768 	struct se_portal_group *se_tpg = to_tpg(item);
769 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
770 			struct tcm_loop_tpg, tl_se_tpg);
771 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
772 	unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
773 	int ret;
774 	/*
775 	 * Shutdown the active I_T nexus if 'NULL' is passed..
776 	 */
777 	if (!strncmp(page, "NULL", 4)) {
778 		ret = tcm_loop_drop_nexus(tl_tpg);
779 		return (!ret) ? count : ret;
780 	}
781 	/*
782 	 * Otherwise make sure the passed virtual Initiator port WWN matches
783 	 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
784 	 * tcm_loop_make_nexus()
785 	 */
786 	if (strlen(page) >= TL_WWN_ADDR_LEN) {
787 		pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
788 		       page, TL_WWN_ADDR_LEN);
789 		return -EINVAL;
790 	}
791 	snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
792 
793 	ptr = strstr(i_port, "naa.");
794 	if (ptr) {
795 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
796 			pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
797 			       i_port, tcm_loop_dump_proto_id(tl_hba));
798 			return -EINVAL;
799 		}
800 		port_ptr = &i_port[0];
801 		goto check_newline;
802 	}
803 	ptr = strstr(i_port, "fc.");
804 	if (ptr) {
805 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
806 			pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
807 			       i_port, tcm_loop_dump_proto_id(tl_hba));
808 			return -EINVAL;
809 		}
810 		port_ptr = &i_port[3]; /* Skip over "fc." */
811 		goto check_newline;
812 	}
813 	ptr = strstr(i_port, "iqn.");
814 	if (ptr) {
815 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
816 			pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
817 			       i_port, tcm_loop_dump_proto_id(tl_hba));
818 			return -EINVAL;
819 		}
820 		port_ptr = &i_port[0];
821 		goto check_newline;
822 	}
823 	pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
824 	       i_port);
825 	return -EINVAL;
826 	/*
827 	 * Clear any trailing newline for the NAA WWN
828 	 */
829 check_newline:
830 	if (i_port[strlen(i_port)-1] == '\n')
831 		i_port[strlen(i_port)-1] = '\0';
832 
833 	ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
834 	if (ret < 0)
835 		return ret;
836 
837 	return count;
838 }
839 
tcm_loop_tpg_transport_status_show(struct config_item * item,char * page)840 static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
841 		char *page)
842 {
843 	struct se_portal_group *se_tpg = to_tpg(item);
844 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
845 			struct tcm_loop_tpg, tl_se_tpg);
846 	const char *status = NULL;
847 	ssize_t ret = -EINVAL;
848 
849 	switch (tl_tpg->tl_transport_status) {
850 	case TCM_TRANSPORT_ONLINE:
851 		status = "online";
852 		break;
853 	case TCM_TRANSPORT_OFFLINE:
854 		status = "offline";
855 		break;
856 	default:
857 		break;
858 	}
859 
860 	if (status)
861 		ret = snprintf(page, PAGE_SIZE, "%s\n", status);
862 
863 	return ret;
864 }
865 
tcm_loop_tpg_transport_status_store(struct config_item * item,const char * page,size_t count)866 static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
867 		const char *page, size_t count)
868 {
869 	struct se_portal_group *se_tpg = to_tpg(item);
870 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
871 			struct tcm_loop_tpg, tl_se_tpg);
872 
873 	if (!strncmp(page, "online", 6)) {
874 		tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
875 		return count;
876 	}
877 	if (!strncmp(page, "offline", 7)) {
878 		tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
879 		if (tl_tpg->tl_nexus) {
880 			struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
881 
882 			core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
883 		}
884 		return count;
885 	}
886 	return -EINVAL;
887 }
888 
tcm_loop_tpg_address_show(struct config_item * item,char * page)889 static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
890 					 char *page)
891 {
892 	struct se_portal_group *se_tpg = to_tpg(item);
893 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
894 			struct tcm_loop_tpg, tl_se_tpg);
895 	struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
896 
897 	return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
898 			tl_hba->sh->host_no, tl_tpg->tl_tpgt);
899 }
900 
901 CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
902 CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
903 CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
904 
905 static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
906 	&tcm_loop_tpg_attr_nexus,
907 	&tcm_loop_tpg_attr_transport_status,
908 	&tcm_loop_tpg_attr_address,
909 	NULL,
910 };
911 
912 /* Start items for tcm_loop_naa_cit */
913 
tcm_loop_make_naa_tpg(struct se_wwn * wwn,const char * name)914 static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn,
915 						     const char *name)
916 {
917 	struct tcm_loop_hba *tl_hba = container_of(wwn,
918 			struct tcm_loop_hba, tl_hba_wwn);
919 	struct tcm_loop_tpg *tl_tpg;
920 	int ret;
921 	unsigned long tpgt;
922 
923 	if (strstr(name, "tpgt_") != name) {
924 		pr_err("Unable to locate \"tpgt_#\" directory group\n");
925 		return ERR_PTR(-EINVAL);
926 	}
927 	if (kstrtoul(name+5, 10, &tpgt))
928 		return ERR_PTR(-EINVAL);
929 
930 	if (tpgt >= TL_TPGS_PER_HBA) {
931 		pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
932 		       tpgt, TL_TPGS_PER_HBA);
933 		return ERR_PTR(-EINVAL);
934 	}
935 	tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
936 	tl_tpg->tl_hba = tl_hba;
937 	tl_tpg->tl_tpgt = tpgt;
938 	/*
939 	 * Register the tl_tpg as a emulated TCM Target Endpoint
940 	 */
941 	ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
942 	if (ret < 0)
943 		return ERR_PTR(-ENOMEM);
944 
945 	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
946 		 tcm_loop_dump_proto_id(tl_hba),
947 		 config_item_name(&wwn->wwn_group.cg_item), tpgt);
948 	return &tl_tpg->tl_se_tpg;
949 }
950 
tcm_loop_drop_naa_tpg(struct se_portal_group * se_tpg)951 static void tcm_loop_drop_naa_tpg(
952 	struct se_portal_group *se_tpg)
953 {
954 	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
955 	struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
956 				struct tcm_loop_tpg, tl_se_tpg);
957 	struct tcm_loop_hba *tl_hba;
958 	unsigned short tpgt;
959 
960 	tl_hba = tl_tpg->tl_hba;
961 	tpgt = tl_tpg->tl_tpgt;
962 	/*
963 	 * Release the I_T Nexus for the Virtual target link if present
964 	 */
965 	tcm_loop_drop_nexus(tl_tpg);
966 	/*
967 	 * Deregister the tl_tpg as a emulated TCM Target Endpoint
968 	 */
969 	core_tpg_deregister(se_tpg);
970 
971 	tl_tpg->tl_hba = NULL;
972 	tl_tpg->tl_tpgt = 0;
973 
974 	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
975 		 tcm_loop_dump_proto_id(tl_hba),
976 		 config_item_name(&wwn->wwn_group.cg_item), tpgt);
977 }
978 
979 /* End items for tcm_loop_naa_cit */
980 
981 /* Start items for tcm_loop_cit */
982 
tcm_loop_make_scsi_hba(struct target_fabric_configfs * tf,struct config_group * group,const char * name)983 static struct se_wwn *tcm_loop_make_scsi_hba(
984 	struct target_fabric_configfs *tf,
985 	struct config_group *group,
986 	const char *name)
987 {
988 	struct tcm_loop_hba *tl_hba;
989 	struct Scsi_Host *sh;
990 	char *ptr;
991 	int ret, off = 0;
992 
993 	tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
994 	if (!tl_hba)
995 		return ERR_PTR(-ENOMEM);
996 
997 	/*
998 	 * Determine the emulated Protocol Identifier and Target Port Name
999 	 * based on the incoming configfs directory name.
1000 	 */
1001 	ptr = strstr(name, "naa.");
1002 	if (ptr) {
1003 		tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1004 		goto check_len;
1005 	}
1006 	ptr = strstr(name, "fc.");
1007 	if (ptr) {
1008 		tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1009 		off = 3; /* Skip over "fc." */
1010 		goto check_len;
1011 	}
1012 	ptr = strstr(name, "iqn.");
1013 	if (!ptr) {
1014 		pr_err("Unable to locate prefix for emulated Target Port: %s\n",
1015 		       name);
1016 		ret = -EINVAL;
1017 		goto out;
1018 	}
1019 	tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1020 
1021 check_len:
1022 	if (strlen(name) >= TL_WWN_ADDR_LEN) {
1023 		pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
1024 		       name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
1025 		ret = -EINVAL;
1026 		goto out;
1027 	}
1028 	snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1029 
1030 	/*
1031 	 * Call device_register(tl_hba->dev) to register the emulated
1032 	 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1033 	 * device_register() callbacks in tcm_loop_driver_probe()
1034 	 */
1035 	ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1036 	if (ret)
1037 		return ERR_PTR(ret);
1038 
1039 	sh = tl_hba->sh;
1040 	tcm_loop_hba_no_cnt++;
1041 	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1042 		 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1043 	return &tl_hba->tl_hba_wwn;
1044 out:
1045 	kfree(tl_hba);
1046 	return ERR_PTR(ret);
1047 }
1048 
tcm_loop_drop_scsi_hba(struct se_wwn * wwn)1049 static void tcm_loop_drop_scsi_hba(
1050 	struct se_wwn *wwn)
1051 {
1052 	struct tcm_loop_hba *tl_hba = container_of(wwn,
1053 				struct tcm_loop_hba, tl_hba_wwn);
1054 
1055 	pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1056 		 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1057 		 tl_hba->sh->host_no);
1058 	/*
1059 	 * Call device_unregister() on the original tl_hba->dev.
1060 	 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1061 	 * release *tl_hba;
1062 	 */
1063 	device_unregister(&tl_hba->dev);
1064 }
1065 
1066 /* Start items for tcm_loop_cit */
tcm_loop_wwn_version_show(struct config_item * item,char * page)1067 static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1068 {
1069 	return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1070 }
1071 
1072 CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1073 
1074 static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1075 	&tcm_loop_wwn_attr_version,
1076 	NULL,
1077 };
1078 
1079 /* End items for tcm_loop_cit */
1080 
1081 static const struct target_core_fabric_ops loop_ops = {
1082 	.module				= THIS_MODULE,
1083 	.fabric_name			= "loopback",
1084 	.tpg_get_wwn			= tcm_loop_get_endpoint_wwn,
1085 	.tpg_get_tag			= tcm_loop_get_tag,
1086 	.tpg_check_demo_mode		= tcm_loop_check_demo_mode,
1087 	.tpg_check_prot_fabric_only	= tcm_loop_check_prot_fabric_only,
1088 	.check_stop_free		= tcm_loop_check_stop_free,
1089 	.release_cmd			= tcm_loop_release_cmd,
1090 	.sess_get_index			= tcm_loop_sess_get_index,
1091 	.write_pending			= tcm_loop_write_pending,
1092 	.get_cmd_state			= tcm_loop_get_cmd_state,
1093 	.queue_data_in			= tcm_loop_queue_data_in,
1094 	.queue_status			= tcm_loop_queue_status,
1095 	.queue_tm_rsp			= tcm_loop_queue_tm_rsp,
1096 	.aborted_task			= tcm_loop_aborted_task,
1097 	.fabric_make_wwn		= tcm_loop_make_scsi_hba,
1098 	.fabric_drop_wwn		= tcm_loop_drop_scsi_hba,
1099 	.fabric_make_tpg		= tcm_loop_make_naa_tpg,
1100 	.fabric_drop_tpg		= tcm_loop_drop_naa_tpg,
1101 	.fabric_post_link		= tcm_loop_port_link,
1102 	.fabric_pre_unlink		= tcm_loop_port_unlink,
1103 	.tfc_wwn_attrs			= tcm_loop_wwn_attrs,
1104 	.tfc_tpg_base_attrs		= tcm_loop_tpg_attrs,
1105 	.tfc_tpg_attrib_attrs		= tcm_loop_tpg_attrib_attrs,
1106 	.default_submit_type		= TARGET_QUEUE_SUBMIT,
1107 	.direct_submit_supp		= 0,
1108 };
1109 
tcm_loop_fabric_init(void)1110 static int __init tcm_loop_fabric_init(void)
1111 {
1112 	int ret = -ENOMEM;
1113 
1114 	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1115 				sizeof(struct tcm_loop_cmd),
1116 				__alignof__(struct tcm_loop_cmd),
1117 				0, NULL);
1118 	if (!tcm_loop_cmd_cache) {
1119 		pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
1120 		goto out;
1121 	}
1122 
1123 	ret = tcm_loop_alloc_core_bus();
1124 	if (ret)
1125 		goto out_destroy_cache;
1126 
1127 	ret = target_register_template(&loop_ops);
1128 	if (ret)
1129 		goto out_release_core_bus;
1130 
1131 	return 0;
1132 
1133 out_release_core_bus:
1134 	tcm_loop_release_core_bus();
1135 out_destroy_cache:
1136 	kmem_cache_destroy(tcm_loop_cmd_cache);
1137 out:
1138 	return ret;
1139 }
1140 
tcm_loop_fabric_exit(void)1141 static void __exit tcm_loop_fabric_exit(void)
1142 {
1143 	target_unregister_template(&loop_ops);
1144 	tcm_loop_release_core_bus();
1145 	kmem_cache_destroy(tcm_loop_cmd_cache);
1146 }
1147 
1148 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1149 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1150 MODULE_LICENSE("GPL");
1151 module_init(tcm_loop_fabric_init);
1152 module_exit(tcm_loop_fabric_exit);
1153