1 /*******************************************************************************
2 *
3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
4 * for emulated SAS initiator ports
5 *
6 * © Copyright 2011-2013 Datera, Inc.
7 *
8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
9 *
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
22
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/configfs.h>
29 #include <linux/blk-mq.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_tcq.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_cmnd.h>
35
36 #include <target/target_core_base.h>
37 #include <target/target_core_fabric.h>
38
39 #include "tcm_loop.h"
40
41 #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
42
43 static struct kmem_cache *tcm_loop_cmd_cache;
44
45 static int tcm_loop_hba_no_cnt;
46
47 static int tcm_loop_queue_status(struct se_cmd *se_cmd);
48
49 static unsigned int tcm_loop_nr_hw_queues = 1;
50 module_param_named(nr_hw_queues, tcm_loop_nr_hw_queues, uint, 0644);
51
52 static unsigned int tcm_loop_can_queue = 1024;
53 module_param_named(can_queue, tcm_loop_can_queue, uint, 0644);
54
55 static unsigned int tcm_loop_cmd_per_lun = 1024;
56 module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644);
57
58 /*
59 * Called from struct target_core_fabric_ops->check_stop_free()
60 */
tcm_loop_check_stop_free(struct se_cmd * se_cmd)61 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
62 {
63 return transport_generic_free_cmd(se_cmd, 0);
64 }
65
tcm_loop_release_cmd(struct se_cmd * se_cmd)66 static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
67 {
68 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
69 struct tcm_loop_cmd, tl_se_cmd);
70 struct scsi_cmnd *sc = tl_cmd->sc;
71
72 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
73 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
74 else
75 scsi_done(sc);
76 }
77
tcm_loop_show_info(struct seq_file * m,struct Scsi_Host * host)78 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
79 {
80 seq_puts(m, "tcm_loop_proc_info()\n");
81 return 0;
82 }
83
84 static int tcm_loop_driver_probe(struct device *);
85 static void tcm_loop_driver_remove(struct device *);
86
87 static const struct bus_type tcm_loop_lld_bus = {
88 .name = "tcm_loop_bus",
89 .probe = tcm_loop_driver_probe,
90 .remove = tcm_loop_driver_remove,
91 };
92
93 static struct device_driver tcm_loop_driverfs = {
94 .name = "tcm_loop",
95 .bus = &tcm_loop_lld_bus,
96 };
97 /*
98 * Used with root_device_register() in tcm_loop_alloc_core_bus() below
99 */
100 static struct device *tcm_loop_primary;
101
tcm_loop_target_queue_cmd(struct tcm_loop_cmd * tl_cmd)102 static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd)
103 {
104 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
105 struct scsi_cmnd *sc = tl_cmd->sc;
106 struct tcm_loop_nexus *tl_nexus;
107 struct tcm_loop_hba *tl_hba;
108 struct tcm_loop_tpg *tl_tpg;
109 struct scatterlist *sgl_bidi = NULL;
110 u32 sgl_bidi_count = 0, transfer_length;
111
112 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
113 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
114
115 /*
116 * Ensure that this tl_tpg reference from the incoming sc->device->id
117 * has already been configured via tcm_loop_make_naa_tpg().
118 */
119 if (!tl_tpg->tl_hba) {
120 set_host_byte(sc, DID_NO_CONNECT);
121 goto out_done;
122 }
123 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
124 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
125 goto out_done;
126 }
127 tl_nexus = tl_tpg->tl_nexus;
128 if (!tl_nexus) {
129 scmd_printk(KERN_ERR, sc,
130 "TCM_Loop I_T Nexus does not exist\n");
131 set_host_byte(sc, DID_ERROR);
132 goto out_done;
133 }
134
135 transfer_length = scsi_transfer_length(sc);
136 if (!scsi_prot_sg_count(sc) &&
137 scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
138 se_cmd->prot_pto = true;
139 /*
140 * loopback transport doesn't support
141 * WRITE_GENERATE, READ_STRIP protection
142 * information operations, go ahead unprotected.
143 */
144 transfer_length = scsi_bufflen(sc);
145 }
146
147 se_cmd->tag = tl_cmd->sc_cmd_tag;
148 target_init_cmd(se_cmd, tl_nexus->se_sess, &tl_cmd->tl_sense_buf[0],
149 tl_cmd->sc->device->lun, transfer_length,
150 TCM_SIMPLE_TAG, sc->sc_data_direction, 0);
151
152 if (target_submit_prep(se_cmd, sc->cmnd, scsi_sglist(sc),
153 scsi_sg_count(sc), sgl_bidi, sgl_bidi_count,
154 scsi_prot_sglist(sc), scsi_prot_sg_count(sc),
155 GFP_ATOMIC))
156 return;
157
158 target_submit(se_cmd);
159 return;
160
161 out_done:
162 scsi_done(sc);
163 }
164
165 /*
166 * ->queuecommand can be and usually is called from interrupt context, so
167 * defer the actual submission to a workqueue.
168 */
tcm_loop_queuecommand(struct Scsi_Host * sh,struct scsi_cmnd * sc)169 static enum scsi_qc_status tcm_loop_queuecommand(struct Scsi_Host *sh,
170 struct scsi_cmnd *sc)
171 {
172 struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
173
174 pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
175 __func__, sc->device->host->host_no, sc->device->id,
176 sc->device->channel, sc->device->lun, sc->cmnd[0],
177 scsi_bufflen(sc));
178
179 memset(tl_cmd, 0, sizeof(*tl_cmd));
180 tl_cmd->sc = sc;
181 tl_cmd->sc_cmd_tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
182
183 tcm_loop_target_queue_cmd(tl_cmd);
184 return 0;
185 }
186
187 /*
188 * Called from SCSI EH process context to issue a LUN_RESET TMR
189 * to struct scsi_device
190 */
tcm_loop_issue_tmr(struct tcm_loop_tpg * tl_tpg,u64 lun,int task,enum tcm_tmreq_table tmr)191 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
192 u64 lun, int task, enum tcm_tmreq_table tmr)
193 {
194 struct se_cmd *se_cmd;
195 struct se_session *se_sess;
196 struct tcm_loop_nexus *tl_nexus;
197 struct tcm_loop_cmd *tl_cmd;
198 int ret = TMR_FUNCTION_FAILED, rc;
199
200 /*
201 * Locate the tl_nexus and se_sess pointers
202 */
203 tl_nexus = tl_tpg->tl_nexus;
204 if (!tl_nexus) {
205 pr_err("Unable to perform device reset without active I_T Nexus\n");
206 return ret;
207 }
208
209 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
210 if (!tl_cmd)
211 return ret;
212
213 init_completion(&tl_cmd->tmr_done);
214
215 se_cmd = &tl_cmd->tl_se_cmd;
216 se_sess = tl_tpg->tl_nexus->se_sess;
217
218 rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
219 NULL, tmr, GFP_KERNEL, task,
220 TARGET_SCF_ACK_KREF);
221 if (rc < 0)
222 goto release;
223 wait_for_completion(&tl_cmd->tmr_done);
224 ret = se_cmd->se_tmr_req->response;
225 target_put_sess_cmd(se_cmd);
226
227 out:
228 return ret;
229
230 release:
231 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
232 goto out;
233 }
234
tcm_loop_abort_task(struct scsi_cmnd * sc)235 static int tcm_loop_abort_task(struct scsi_cmnd *sc)
236 {
237 struct tcm_loop_hba *tl_hba;
238 struct tcm_loop_tpg *tl_tpg;
239 int ret;
240
241 /*
242 * Locate the tcm_loop_hba_t pointer
243 */
244 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
245 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
246 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
247 blk_mq_unique_tag(scsi_cmd_to_rq(sc)),
248 TMR_ABORT_TASK);
249 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
250 }
251
252 /*
253 * Called from SCSI EH process context to issue a LUN_RESET TMR
254 * to struct scsi_device
255 */
tcm_loop_device_reset(struct scsi_cmnd * sc)256 static int tcm_loop_device_reset(struct scsi_cmnd *sc)
257 {
258 struct tcm_loop_hba *tl_hba;
259 struct tcm_loop_tpg *tl_tpg;
260 int ret;
261
262 /*
263 * Locate the tcm_loop_hba_t pointer
264 */
265 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
266 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
267
268 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
269 0, TMR_LUN_RESET);
270 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
271 }
272
tcm_loop_flush_work_iter(struct request * rq,void * data)273 static bool tcm_loop_flush_work_iter(struct request *rq, void *data)
274 {
275 struct scsi_cmnd *sc = blk_mq_rq_to_pdu(rq);
276 struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
277 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
278
279 flush_work(&se_cmd->work);
280 return true;
281 }
282
tcm_loop_target_reset(struct scsi_cmnd * sc)283 static int tcm_loop_target_reset(struct scsi_cmnd *sc)
284 {
285 struct tcm_loop_hba *tl_hba;
286 struct tcm_loop_tpg *tl_tpg;
287 struct Scsi_Host *sh = sc->device->host;
288 int ret;
289
290 /*
291 * Locate the tcm_loop_hba_t pointer
292 */
293 tl_hba = *(struct tcm_loop_hba **)shost_priv(sh);
294 if (!tl_hba) {
295 pr_err("Unable to perform device reset without active I_T Nexus\n");
296 return FAILED;
297 }
298 /*
299 * Locate the tl_tpg pointer from TargetID in sc->device->id
300 */
301 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
302 if (!tl_tpg)
303 return FAILED;
304
305 /*
306 * Issue a LUN_RESET to drain all commands that the target core
307 * knows about. This handles commands not yet marked CMD_T_COMPLETE.
308 */
309 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 0, TMR_LUN_RESET);
310 if (ret != TMR_FUNCTION_COMPLETE)
311 return FAILED;
312
313 /*
314 * Flush any deferred target core completion work that may still be
315 * queued. Commands that already had CMD_T_COMPLETE set before the TMR
316 * are skipped by the TMR drain, but their async completion work
317 * (transport_lun_remove_cmd → percpu_ref_put, release_cmd → scsi_done)
318 * may still be pending in target_completion_wq.
319 *
320 * The SCSI EH will reuse in-flight scsi_cmnd structures for recovery
321 * commands (e.g. TUR) immediately after this handler returns SUCCESS —
322 * if deferred work is still pending, the memset in queuecommand would
323 * zero the se_cmd while the work accesses it, leaking the LUN
324 * percpu_ref and hanging configfs unlink forever.
325 *
326 * Use blk_mq_tagset_busy_iter() to find all started requests and
327 * flush_work() on each — the same pattern used by mpi3mr, scsi_debug,
328 * and other SCSI drivers to drain outstanding commands during reset.
329 */
330 blk_mq_tagset_busy_iter(&sh->tag_set, tcm_loop_flush_work_iter, NULL);
331
332 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
333 return SUCCESS;
334 }
335
336 static const struct scsi_host_template tcm_loop_driver_template = {
337 .show_info = tcm_loop_show_info,
338 .proc_name = "tcm_loopback",
339 .name = "TCM_Loopback",
340 .queuecommand = tcm_loop_queuecommand,
341 .change_queue_depth = scsi_change_queue_depth,
342 .eh_abort_handler = tcm_loop_abort_task,
343 .eh_device_reset_handler = tcm_loop_device_reset,
344 .eh_target_reset_handler = tcm_loop_target_reset,
345 .this_id = -1,
346 .sg_tablesize = 256,
347 .max_sectors = 0xFFFF,
348 .dma_boundary = PAGE_SIZE - 1,
349 .module = THIS_MODULE,
350 .track_queue_depth = 1,
351 .cmd_size = sizeof(struct tcm_loop_cmd),
352 };
353
tcm_loop_driver_probe(struct device * dev)354 static int tcm_loop_driver_probe(struct device *dev)
355 {
356 struct tcm_loop_hba *tl_hba;
357 struct Scsi_Host *sh;
358 int error, host_prot;
359
360 tl_hba = to_tcm_loop_hba(dev);
361
362 sh = scsi_host_alloc(&tcm_loop_driver_template,
363 sizeof(struct tcm_loop_hba));
364 if (!sh) {
365 pr_err("Unable to allocate struct scsi_host\n");
366 return -ENODEV;
367 }
368 tl_hba->sh = sh;
369
370 /*
371 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
372 */
373 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
374 /*
375 * Setup single ID, Channel and LUN for now..
376 */
377 sh->max_id = 2;
378 sh->max_lun = 0;
379 sh->max_channel = 0;
380 sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
381 sh->nr_hw_queues = tcm_loop_nr_hw_queues;
382 sh->can_queue = tcm_loop_can_queue;
383 sh->cmd_per_lun = tcm_loop_cmd_per_lun;
384
385 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
386 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
387 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
388
389 scsi_host_set_prot(sh, host_prot);
390 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
391
392 error = scsi_add_host(sh, &tl_hba->dev);
393 if (error) {
394 pr_err("%s: scsi_add_host failed\n", __func__);
395 scsi_host_put(sh);
396 return -ENODEV;
397 }
398 return 0;
399 }
400
tcm_loop_driver_remove(struct device * dev)401 static void tcm_loop_driver_remove(struct device *dev)
402 {
403 struct tcm_loop_hba *tl_hba;
404 struct Scsi_Host *sh;
405
406 tl_hba = to_tcm_loop_hba(dev);
407 sh = tl_hba->sh;
408
409 scsi_remove_host(sh);
410 scsi_host_put(sh);
411 }
412
tcm_loop_release_adapter(struct device * dev)413 static void tcm_loop_release_adapter(struct device *dev)
414 {
415 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
416
417 kfree(tl_hba);
418 }
419
420 /*
421 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
422 */
tcm_loop_setup_hba_bus(struct tcm_loop_hba * tl_hba,int tcm_loop_host_id)423 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
424 {
425 int ret;
426
427 tl_hba->dev.bus = &tcm_loop_lld_bus;
428 tl_hba->dev.parent = tcm_loop_primary;
429 tl_hba->dev.release = &tcm_loop_release_adapter;
430 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
431
432 ret = device_register(&tl_hba->dev);
433 if (ret) {
434 pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
435 put_device(&tl_hba->dev);
436 return -ENODEV;
437 }
438
439 return 0;
440 }
441
442 /*
443 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
444 * tcm_loop SCSI bus.
445 */
tcm_loop_alloc_core_bus(void)446 static int tcm_loop_alloc_core_bus(void)
447 {
448 int ret;
449
450 tcm_loop_primary = root_device_register("tcm_loop_0");
451 if (IS_ERR(tcm_loop_primary)) {
452 pr_err("Unable to allocate tcm_loop_primary\n");
453 return PTR_ERR(tcm_loop_primary);
454 }
455
456 ret = bus_register(&tcm_loop_lld_bus);
457 if (ret) {
458 pr_err("bus_register() failed for tcm_loop_lld_bus\n");
459 goto dev_unreg;
460 }
461
462 ret = driver_register(&tcm_loop_driverfs);
463 if (ret) {
464 pr_err("driver_register() failed for tcm_loop_driverfs\n");
465 goto bus_unreg;
466 }
467
468 pr_debug("Initialized TCM Loop Core Bus\n");
469 return ret;
470
471 bus_unreg:
472 bus_unregister(&tcm_loop_lld_bus);
473 dev_unreg:
474 root_device_unregister(tcm_loop_primary);
475 return ret;
476 }
477
tcm_loop_release_core_bus(void)478 static void tcm_loop_release_core_bus(void)
479 {
480 driver_unregister(&tcm_loop_driverfs);
481 bus_unregister(&tcm_loop_lld_bus);
482 root_device_unregister(tcm_loop_primary);
483
484 pr_debug("Releasing TCM Loop Core BUS\n");
485 }
486
tl_tpg(struct se_portal_group * se_tpg)487 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
488 {
489 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
490 }
491
tcm_loop_get_endpoint_wwn(struct se_portal_group * se_tpg)492 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
493 {
494 /*
495 * Return the passed NAA identifier for the Target Port
496 */
497 return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
498 }
499
tcm_loop_get_tag(struct se_portal_group * se_tpg)500 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
501 {
502 /*
503 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
504 * to represent the SCSI Target Port.
505 */
506 return tl_tpg(se_tpg)->tl_tpgt;
507 }
508
509 /*
510 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
511 * based upon the incoming fabric dependent SCSI Initiator Port
512 */
tcm_loop_check_demo_mode(struct se_portal_group * se_tpg)513 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
514 {
515 return 1;
516 }
517
tcm_loop_check_prot_fabric_only(struct se_portal_group * se_tpg)518 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
519 {
520 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
521 tl_se_tpg);
522 return tl_tpg->tl_fabric_prot_type;
523 }
524
tcm_loop_sess_get_index(struct se_session * se_sess)525 static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
526 {
527 return 1;
528 }
529
tcm_loop_get_cmd_state(struct se_cmd * se_cmd)530 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
531 {
532 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
533 struct tcm_loop_cmd, tl_se_cmd);
534
535 return tl_cmd->sc_cmd_state;
536 }
537
tcm_loop_write_pending(struct se_cmd * se_cmd)538 static int tcm_loop_write_pending(struct se_cmd *se_cmd)
539 {
540 /*
541 * Since Linux/SCSI has already sent down a struct scsi_cmnd
542 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
543 * memory, and memory has already been mapped to struct se_cmd->t_mem_list
544 * format with transport_generic_map_mem_to_cmd().
545 *
546 * We now tell TCM to add this WRITE CDB directly into the TCM storage
547 * object execution queue.
548 */
549 target_execute_cmd(se_cmd);
550 return 0;
551 }
552
tcm_loop_queue_data_or_status(const char * func,struct se_cmd * se_cmd,u8 scsi_status)553 static int tcm_loop_queue_data_or_status(const char *func,
554 struct se_cmd *se_cmd, u8 scsi_status)
555 {
556 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
557 struct tcm_loop_cmd, tl_se_cmd);
558 struct scsi_cmnd *sc = tl_cmd->sc;
559
560 pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
561 func, sc, sc->cmnd[0]);
562
563 if (se_cmd->sense_buffer &&
564 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
565 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
566
567 memcpy(sc->sense_buffer, se_cmd->sense_buffer,
568 SCSI_SENSE_BUFFERSIZE);
569 sc->result = SAM_STAT_CHECK_CONDITION;
570 } else
571 sc->result = scsi_status;
572
573 set_host_byte(sc, DID_OK);
574 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
575 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
576 scsi_set_resid(sc, se_cmd->residual_count);
577 return 0;
578 }
579
tcm_loop_queue_data_in(struct se_cmd * se_cmd)580 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
581 {
582 return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD);
583 }
584
tcm_loop_queue_status(struct se_cmd * se_cmd)585 static int tcm_loop_queue_status(struct se_cmd *se_cmd)
586 {
587 return tcm_loop_queue_data_or_status(__func__,
588 se_cmd, se_cmd->scsi_status);
589 }
590
tcm_loop_queue_tm_rsp(struct se_cmd * se_cmd)591 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
592 {
593 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
594 struct tcm_loop_cmd, tl_se_cmd);
595
596 /* Wake up tcm_loop_issue_tmr(). */
597 complete(&tl_cmd->tmr_done);
598 }
599
tcm_loop_aborted_task(struct se_cmd * se_cmd)600 static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
601 {
602 return;
603 }
604
tcm_loop_dump_proto_id(struct tcm_loop_hba * tl_hba)605 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
606 {
607 switch (tl_hba->tl_proto_id) {
608 case SCSI_PROTOCOL_SAS:
609 return "SAS";
610 case SCSI_PROTOCOL_FCP:
611 return "FCP";
612 case SCSI_PROTOCOL_ISCSI:
613 return "iSCSI";
614 default:
615 break;
616 }
617
618 return "Unknown";
619 }
620
621 /* Start items for tcm_loop_port_cit */
622
tcm_loop_port_link(struct se_portal_group * se_tpg,struct se_lun * lun)623 static int tcm_loop_port_link(
624 struct se_portal_group *se_tpg,
625 struct se_lun *lun)
626 {
627 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
628 struct tcm_loop_tpg, tl_se_tpg);
629 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
630
631 atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
632 /*
633 * Add Linux/SCSI struct scsi_device by HCTL
634 */
635 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
636
637 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
638 return 0;
639 }
640
tcm_loop_port_unlink(struct se_portal_group * se_tpg,struct se_lun * se_lun)641 static void tcm_loop_port_unlink(
642 struct se_portal_group *se_tpg,
643 struct se_lun *se_lun)
644 {
645 struct scsi_device *sd;
646 struct tcm_loop_hba *tl_hba;
647 struct tcm_loop_tpg *tl_tpg;
648
649 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
650 tl_hba = tl_tpg->tl_hba;
651
652 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
653 se_lun->unpacked_lun);
654 if (!sd) {
655 pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
656 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
657 return;
658 }
659 /*
660 * Remove Linux/SCSI struct scsi_device by HCTL
661 */
662 scsi_remove_device(sd);
663 scsi_device_put(sd);
664
665 atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
666
667 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
668 }
669
670 /* End items for tcm_loop_port_cit */
671
tcm_loop_tpg_attrib_fabric_prot_type_show(struct config_item * item,char * page)672 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
673 struct config_item *item, char *page)
674 {
675 struct se_portal_group *se_tpg = attrib_to_tpg(item);
676 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
677 tl_se_tpg);
678
679 return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
680 }
681
tcm_loop_tpg_attrib_fabric_prot_type_store(struct config_item * item,const char * page,size_t count)682 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
683 struct config_item *item, const char *page, size_t count)
684 {
685 struct se_portal_group *se_tpg = attrib_to_tpg(item);
686 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
687 tl_se_tpg);
688 unsigned long val;
689 int ret = kstrtoul(page, 0, &val);
690
691 if (ret) {
692 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
693 return ret;
694 }
695 if (val != 0 && val != 1 && val != 3) {
696 pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
697 return -EINVAL;
698 }
699 tl_tpg->tl_fabric_prot_type = val;
700
701 return count;
702 }
703
704 CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
705
706 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
707 &tcm_loop_tpg_attrib_attr_fabric_prot_type,
708 NULL,
709 };
710
711 /* Start items for tcm_loop_nexus_cit */
712
tcm_loop_alloc_sess_cb(struct se_portal_group * se_tpg,struct se_session * se_sess,void * p)713 static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
714 struct se_session *se_sess, void *p)
715 {
716 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
717 struct tcm_loop_tpg, tl_se_tpg);
718
719 tl_tpg->tl_nexus = p;
720 return 0;
721 }
722
tcm_loop_make_nexus(struct tcm_loop_tpg * tl_tpg,const char * name)723 static int tcm_loop_make_nexus(
724 struct tcm_loop_tpg *tl_tpg,
725 const char *name)
726 {
727 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
728 struct tcm_loop_nexus *tl_nexus;
729 int ret;
730
731 if (tl_tpg->tl_nexus) {
732 pr_debug("tl_tpg->tl_nexus already exists\n");
733 return -EEXIST;
734 }
735
736 tl_nexus = kzalloc_obj(*tl_nexus);
737 if (!tl_nexus)
738 return -ENOMEM;
739
740 tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0,
741 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
742 name, tl_nexus, tcm_loop_alloc_sess_cb);
743 if (IS_ERR(tl_nexus->se_sess)) {
744 ret = PTR_ERR(tl_nexus->se_sess);
745 kfree(tl_nexus);
746 return ret;
747 }
748
749 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
750 tcm_loop_dump_proto_id(tl_hba), name);
751 return 0;
752 }
753
tcm_loop_drop_nexus(struct tcm_loop_tpg * tpg)754 static int tcm_loop_drop_nexus(
755 struct tcm_loop_tpg *tpg)
756 {
757 struct se_session *se_sess;
758 struct tcm_loop_nexus *tl_nexus;
759
760 tl_nexus = tpg->tl_nexus;
761 if (!tl_nexus)
762 return -ENODEV;
763
764 se_sess = tl_nexus->se_sess;
765 if (!se_sess)
766 return -ENODEV;
767
768 if (atomic_read(&tpg->tl_tpg_port_count)) {
769 pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
770 atomic_read(&tpg->tl_tpg_port_count));
771 return -EPERM;
772 }
773
774 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
775 tcm_loop_dump_proto_id(tpg->tl_hba),
776 tl_nexus->se_sess->se_node_acl->initiatorname);
777 /*
778 * Release the SCSI I_T Nexus to the emulated Target Port
779 */
780 target_remove_session(se_sess);
781 tpg->tl_nexus = NULL;
782 kfree(tl_nexus);
783 return 0;
784 }
785
786 /* End items for tcm_loop_nexus_cit */
787
tcm_loop_tpg_nexus_show(struct config_item * item,char * page)788 static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
789 {
790 struct se_portal_group *se_tpg = to_tpg(item);
791 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
792 struct tcm_loop_tpg, tl_se_tpg);
793 struct tcm_loop_nexus *tl_nexus;
794 ssize_t ret;
795
796 tl_nexus = tl_tpg->tl_nexus;
797 if (!tl_nexus)
798 return -ENODEV;
799
800 ret = snprintf(page, PAGE_SIZE, "%s\n",
801 tl_nexus->se_sess->se_node_acl->initiatorname);
802
803 return ret;
804 }
805
tcm_loop_tpg_nexus_store(struct config_item * item,const char * page,size_t count)806 static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
807 const char *page, size_t count)
808 {
809 struct se_portal_group *se_tpg = to_tpg(item);
810 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
811 struct tcm_loop_tpg, tl_se_tpg);
812 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
813 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
814 int ret;
815 /*
816 * Shutdown the active I_T nexus if 'NULL' is passed..
817 */
818 if (!strncmp(page, "NULL", 4)) {
819 ret = tcm_loop_drop_nexus(tl_tpg);
820 return (!ret) ? count : ret;
821 }
822 /*
823 * Otherwise make sure the passed virtual Initiator port WWN matches
824 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
825 * tcm_loop_make_nexus()
826 */
827 if (strlen(page) >= TL_WWN_ADDR_LEN) {
828 pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
829 page, TL_WWN_ADDR_LEN);
830 return -EINVAL;
831 }
832 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
833
834 ptr = strstr(i_port, "naa.");
835 if (ptr) {
836 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
837 pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
838 i_port, tcm_loop_dump_proto_id(tl_hba));
839 return -EINVAL;
840 }
841 port_ptr = &i_port[0];
842 goto check_newline;
843 }
844 ptr = strstr(i_port, "fc.");
845 if (ptr) {
846 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
847 pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
848 i_port, tcm_loop_dump_proto_id(tl_hba));
849 return -EINVAL;
850 }
851 port_ptr = &i_port[3]; /* Skip over "fc." */
852 goto check_newline;
853 }
854 ptr = strstr(i_port, "iqn.");
855 if (ptr) {
856 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
857 pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
858 i_port, tcm_loop_dump_proto_id(tl_hba));
859 return -EINVAL;
860 }
861 port_ptr = &i_port[0];
862 goto check_newline;
863 }
864 pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
865 i_port);
866 return -EINVAL;
867 /*
868 * Clear any trailing newline for the NAA WWN
869 */
870 check_newline:
871 if (i_port[strlen(i_port)-1] == '\n')
872 i_port[strlen(i_port)-1] = '\0';
873
874 ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
875 if (ret < 0)
876 return ret;
877
878 return count;
879 }
880
tcm_loop_tpg_transport_status_show(struct config_item * item,char * page)881 static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
882 char *page)
883 {
884 struct se_portal_group *se_tpg = to_tpg(item);
885 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
886 struct tcm_loop_tpg, tl_se_tpg);
887 const char *status = NULL;
888 ssize_t ret = -EINVAL;
889
890 switch (tl_tpg->tl_transport_status) {
891 case TCM_TRANSPORT_ONLINE:
892 status = "online";
893 break;
894 case TCM_TRANSPORT_OFFLINE:
895 status = "offline";
896 break;
897 default:
898 break;
899 }
900
901 if (status)
902 ret = snprintf(page, PAGE_SIZE, "%s\n", status);
903
904 return ret;
905 }
906
tcm_loop_tpg_transport_status_store(struct config_item * item,const char * page,size_t count)907 static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
908 const char *page, size_t count)
909 {
910 struct se_portal_group *se_tpg = to_tpg(item);
911 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
912 struct tcm_loop_tpg, tl_se_tpg);
913
914 if (!strncmp(page, "online", 6)) {
915 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
916 return count;
917 }
918 if (!strncmp(page, "offline", 7)) {
919 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
920 if (tl_tpg->tl_nexus) {
921 struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
922
923 core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
924 }
925 return count;
926 }
927 return -EINVAL;
928 }
929
tcm_loop_tpg_address_show(struct config_item * item,char * page)930 static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
931 char *page)
932 {
933 struct se_portal_group *se_tpg = to_tpg(item);
934 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
935 struct tcm_loop_tpg, tl_se_tpg);
936 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
937
938 if (!tl_hba->sh)
939 return -ENODEV;
940
941 return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
942 tl_hba->sh->host_no, tl_tpg->tl_tpgt);
943 }
944
945 CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
946 CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
947 CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
948
949 static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
950 &tcm_loop_tpg_attr_nexus,
951 &tcm_loop_tpg_attr_transport_status,
952 &tcm_loop_tpg_attr_address,
953 NULL,
954 };
955
956 /* Start items for tcm_loop_naa_cit */
957
tcm_loop_make_naa_tpg(struct se_wwn * wwn,const char * name)958 static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn,
959 const char *name)
960 {
961 struct tcm_loop_hba *tl_hba = container_of(wwn,
962 struct tcm_loop_hba, tl_hba_wwn);
963 struct tcm_loop_tpg *tl_tpg;
964 int ret;
965 unsigned long tpgt;
966
967 if (strstr(name, "tpgt_") != name) {
968 pr_err("Unable to locate \"tpgt_#\" directory group\n");
969 return ERR_PTR(-EINVAL);
970 }
971 if (kstrtoul(name+5, 10, &tpgt))
972 return ERR_PTR(-EINVAL);
973
974 if (tpgt >= TL_TPGS_PER_HBA) {
975 pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
976 tpgt, TL_TPGS_PER_HBA);
977 return ERR_PTR(-EINVAL);
978 }
979 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
980 tl_tpg->tl_hba = tl_hba;
981 tl_tpg->tl_tpgt = tpgt;
982 /*
983 * Register the tl_tpg as a emulated TCM Target Endpoint
984 */
985 ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
986 if (ret < 0)
987 return ERR_PTR(-ENOMEM);
988
989 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
990 tcm_loop_dump_proto_id(tl_hba),
991 config_item_name(&wwn->wwn_group.cg_item), tpgt);
992 return &tl_tpg->tl_se_tpg;
993 }
994
tcm_loop_drop_naa_tpg(struct se_portal_group * se_tpg)995 static void tcm_loop_drop_naa_tpg(
996 struct se_portal_group *se_tpg)
997 {
998 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
999 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
1000 struct tcm_loop_tpg, tl_se_tpg);
1001 struct tcm_loop_hba *tl_hba;
1002 unsigned short tpgt;
1003
1004 tl_hba = tl_tpg->tl_hba;
1005 tpgt = tl_tpg->tl_tpgt;
1006 /*
1007 * Release the I_T Nexus for the Virtual target link if present
1008 */
1009 tcm_loop_drop_nexus(tl_tpg);
1010 /*
1011 * Deregister the tl_tpg as a emulated TCM Target Endpoint
1012 */
1013 core_tpg_deregister(se_tpg);
1014
1015 tl_tpg->tl_hba = NULL;
1016 tl_tpg->tl_tpgt = 0;
1017
1018 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
1019 tcm_loop_dump_proto_id(tl_hba),
1020 config_item_name(&wwn->wwn_group.cg_item), tpgt);
1021 }
1022
1023 /* End items for tcm_loop_naa_cit */
1024
1025 /* Start items for tcm_loop_cit */
1026
tcm_loop_make_scsi_hba(struct target_fabric_configfs * tf,struct config_group * group,const char * name)1027 static struct se_wwn *tcm_loop_make_scsi_hba(
1028 struct target_fabric_configfs *tf,
1029 struct config_group *group,
1030 const char *name)
1031 {
1032 struct tcm_loop_hba *tl_hba;
1033 struct Scsi_Host *sh;
1034 char *ptr;
1035 int ret, off = 0;
1036
1037 tl_hba = kzalloc_obj(*tl_hba);
1038 if (!tl_hba)
1039 return ERR_PTR(-ENOMEM);
1040
1041 /*
1042 * Determine the emulated Protocol Identifier and Target Port Name
1043 * based on the incoming configfs directory name.
1044 */
1045 ptr = strstr(name, "naa.");
1046 if (ptr) {
1047 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
1048 goto check_len;
1049 }
1050 ptr = strstr(name, "fc.");
1051 if (ptr) {
1052 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
1053 off = 3; /* Skip over "fc." */
1054 goto check_len;
1055 }
1056 ptr = strstr(name, "iqn.");
1057 if (!ptr) {
1058 pr_err("Unable to locate prefix for emulated Target Port: %s\n",
1059 name);
1060 ret = -EINVAL;
1061 goto out;
1062 }
1063 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
1064
1065 check_len:
1066 if (strlen(name) >= TL_WWN_ADDR_LEN) {
1067 pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
1068 name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
1069 ret = -EINVAL;
1070 goto out;
1071 }
1072 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
1073
1074 /*
1075 * Call device_register(tl_hba->dev) to register the emulated
1076 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
1077 * device_register() callbacks in tcm_loop_driver_probe()
1078 */
1079 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
1080 if (ret)
1081 return ERR_PTR(ret);
1082
1083 sh = tl_hba->sh;
1084 tcm_loop_hba_no_cnt++;
1085 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1086 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
1087 return &tl_hba->tl_hba_wwn;
1088 out:
1089 kfree(tl_hba);
1090 return ERR_PTR(ret);
1091 }
1092
tcm_loop_drop_scsi_hba(struct se_wwn * wwn)1093 static void tcm_loop_drop_scsi_hba(
1094 struct se_wwn *wwn)
1095 {
1096 struct tcm_loop_hba *tl_hba = container_of(wwn,
1097 struct tcm_loop_hba, tl_hba_wwn);
1098
1099 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
1100 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
1101 tl_hba->sh->host_no);
1102 /*
1103 * Call device_unregister() on the original tl_hba->dev.
1104 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
1105 * release *tl_hba;
1106 */
1107 device_unregister(&tl_hba->dev);
1108 }
1109
1110 /* Start items for tcm_loop_cit */
tcm_loop_wwn_version_show(struct config_item * item,char * page)1111 static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
1112 {
1113 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
1114 }
1115
1116 CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
1117
1118 static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
1119 &tcm_loop_wwn_attr_version,
1120 NULL,
1121 };
1122
1123 /* End items for tcm_loop_cit */
1124
1125 static const struct target_core_fabric_ops loop_ops = {
1126 .module = THIS_MODULE,
1127 .fabric_name = "loopback",
1128 .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
1129 .tpg_get_tag = tcm_loop_get_tag,
1130 .tpg_check_demo_mode = tcm_loop_check_demo_mode,
1131 .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
1132 .check_stop_free = tcm_loop_check_stop_free,
1133 .release_cmd = tcm_loop_release_cmd,
1134 .sess_get_index = tcm_loop_sess_get_index,
1135 .write_pending = tcm_loop_write_pending,
1136 .get_cmd_state = tcm_loop_get_cmd_state,
1137 .queue_data_in = tcm_loop_queue_data_in,
1138 .queue_status = tcm_loop_queue_status,
1139 .queue_tm_rsp = tcm_loop_queue_tm_rsp,
1140 .aborted_task = tcm_loop_aborted_task,
1141 .fabric_make_wwn = tcm_loop_make_scsi_hba,
1142 .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
1143 .fabric_make_tpg = tcm_loop_make_naa_tpg,
1144 .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
1145 .fabric_post_link = tcm_loop_port_link,
1146 .fabric_pre_unlink = tcm_loop_port_unlink,
1147 .tfc_wwn_attrs = tcm_loop_wwn_attrs,
1148 .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
1149 .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
1150 .default_submit_type = TARGET_QUEUE_SUBMIT,
1151 .direct_submit_supp = 0,
1152 };
1153
tcm_loop_fabric_init(void)1154 static int __init tcm_loop_fabric_init(void)
1155 {
1156 int ret = -ENOMEM;
1157
1158 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
1159 sizeof(struct tcm_loop_cmd),
1160 __alignof__(struct tcm_loop_cmd),
1161 0, NULL);
1162 if (!tcm_loop_cmd_cache) {
1163 pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
1164 goto out;
1165 }
1166
1167 ret = tcm_loop_alloc_core_bus();
1168 if (ret)
1169 goto out_destroy_cache;
1170
1171 ret = target_register_template(&loop_ops);
1172 if (ret)
1173 goto out_release_core_bus;
1174
1175 return 0;
1176
1177 out_release_core_bus:
1178 tcm_loop_release_core_bus();
1179 out_destroy_cache:
1180 kmem_cache_destroy(tcm_loop_cmd_cache);
1181 out:
1182 return ret;
1183 }
1184
tcm_loop_fabric_exit(void)1185 static void __exit tcm_loop_fabric_exit(void)
1186 {
1187 target_unregister_template(&loop_ops);
1188 tcm_loop_release_core_bus();
1189 kmem_cache_destroy(tcm_loop_cmd_cache);
1190 }
1191
1192 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
1193 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
1194 MODULE_LICENSE("GPL");
1195 module_init(tcm_loop_fabric_init);
1196 module_exit(tcm_loop_fabric_exit);
1197