1 /******************************************************************************* 2 * 3 * This file contains the Linux/SCSI LLD virtual SCSI initiator driver 4 * for emulated SAS initiator ports 5 * 6 * © Copyright 2011-2013 Datera, Inc. 7 * 8 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 9 * 10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 ****************************************************************************/ 22 23 #include <linux/module.h> 24 #include <linux/moduleparam.h> 25 #include <linux/init.h> 26 #include <linux/slab.h> 27 #include <linux/types.h> 28 #include <linux/configfs.h> 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_tcq.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_cmnd.h> 34 35 #include <target/target_core_base.h> 36 #include <target/target_core_fabric.h> 37 38 #include "tcm_loop.h" 39 40 #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev) 41 42 static struct workqueue_struct *tcm_loop_workqueue; 43 static struct kmem_cache *tcm_loop_cmd_cache; 44 45 static int tcm_loop_hba_no_cnt; 46 47 static int tcm_loop_queue_status(struct se_cmd *se_cmd); 48 49 /* 50 * Called from struct target_core_fabric_ops->check_stop_free() 51 */ 52 static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) 53 { 54 /* 55 * Do not release struct se_cmd's containing a valid TMR 56 * pointer. These will be released directly in tcm_loop_device_reset() 57 * with transport_generic_free_cmd(). 58 */ 59 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 60 return 0; 61 /* 62 * Release the struct se_cmd, which will make a callback to release 63 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() 64 */ 65 transport_generic_free_cmd(se_cmd, 0); 66 return 1; 67 } 68 69 static void tcm_loop_release_cmd(struct se_cmd *se_cmd) 70 { 71 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 72 struct tcm_loop_cmd, tl_se_cmd); 73 74 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 75 } 76 77 static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) 78 { 79 seq_printf(m, "tcm_loop_proc_info()\n"); 80 return 0; 81 } 82 83 static int tcm_loop_driver_probe(struct device *); 84 static int tcm_loop_driver_remove(struct device *); 85 86 static int pseudo_lld_bus_match(struct device *dev, 87 struct device_driver *dev_driver) 88 { 89 return 1; 90 } 91 92 static struct bus_type tcm_loop_lld_bus = { 93 .name = "tcm_loop_bus", 94 .match = pseudo_lld_bus_match, 95 .probe = tcm_loop_driver_probe, 96 .remove = tcm_loop_driver_remove, 97 }; 98 99 static struct device_driver tcm_loop_driverfs = { 100 .name = "tcm_loop", 101 .bus = &tcm_loop_lld_bus, 102 }; 103 /* 104 * Used with root_device_register() in tcm_loop_alloc_core_bus() below 105 */ 106 static struct device *tcm_loop_primary; 107 108 static void tcm_loop_submission_work(struct work_struct *work) 109 { 110 struct tcm_loop_cmd *tl_cmd = 111 container_of(work, struct tcm_loop_cmd, work); 112 struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd; 113 struct scsi_cmnd *sc = tl_cmd->sc; 114 struct tcm_loop_nexus *tl_nexus; 115 struct tcm_loop_hba *tl_hba; 116 struct tcm_loop_tpg *tl_tpg; 117 struct scatterlist *sgl_bidi = NULL; 118 u32 sgl_bidi_count = 0, transfer_length; 119 int rc; 120 121 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 122 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 123 124 /* 125 * Ensure that this tl_tpg reference from the incoming sc->device->id 126 * has already been configured via tcm_loop_make_naa_tpg(). 127 */ 128 if (!tl_tpg->tl_hba) { 129 set_host_byte(sc, DID_NO_CONNECT); 130 goto out_done; 131 } 132 if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) { 133 set_host_byte(sc, DID_TRANSPORT_DISRUPTED); 134 goto out_done; 135 } 136 tl_nexus = tl_tpg->tl_nexus; 137 if (!tl_nexus) { 138 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" 139 " does not exist\n"); 140 set_host_byte(sc, DID_ERROR); 141 goto out_done; 142 } 143 if (scsi_bidi_cmnd(sc)) { 144 struct scsi_data_buffer *sdb = scsi_in(sc); 145 146 sgl_bidi = sdb->table.sgl; 147 sgl_bidi_count = sdb->table.nents; 148 se_cmd->se_cmd_flags |= SCF_BIDI; 149 150 } 151 152 transfer_length = scsi_transfer_length(sc); 153 if (!scsi_prot_sg_count(sc) && 154 scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) { 155 se_cmd->prot_pto = true; 156 /* 157 * loopback transport doesn't support 158 * WRITE_GENERATE, READ_STRIP protection 159 * information operations, go ahead unprotected. 160 */ 161 transfer_length = scsi_bufflen(sc); 162 } 163 164 se_cmd->tag = tl_cmd->sc_cmd_tag; 165 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 166 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 167 transfer_length, TCM_SIMPLE_TAG, 168 sc->sc_data_direction, 0, 169 scsi_sglist(sc), scsi_sg_count(sc), 170 sgl_bidi, sgl_bidi_count, 171 scsi_prot_sglist(sc), scsi_prot_sg_count(sc)); 172 if (rc < 0) { 173 set_host_byte(sc, DID_NO_CONNECT); 174 goto out_done; 175 } 176 return; 177 178 out_done: 179 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 180 sc->scsi_done(sc); 181 return; 182 } 183 184 /* 185 * ->queuecommand can be and usually is called from interrupt context, so 186 * defer the actual submission to a workqueue. 187 */ 188 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) 189 { 190 struct tcm_loop_cmd *tl_cmd; 191 192 pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x" 193 " scsi_buf_len: %u\n", sc->device->host->host_no, 194 sc->device->id, sc->device->channel, sc->device->lun, 195 sc->cmnd[0], scsi_bufflen(sc)); 196 197 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); 198 if (!tl_cmd) { 199 pr_err("Unable to allocate struct tcm_loop_cmd\n"); 200 set_host_byte(sc, DID_ERROR); 201 sc->scsi_done(sc); 202 return 0; 203 } 204 205 tl_cmd->sc = sc; 206 tl_cmd->sc_cmd_tag = sc->request->tag; 207 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); 208 queue_work(tcm_loop_workqueue, &tl_cmd->work); 209 return 0; 210 } 211 212 /* 213 * Called from SCSI EH process context to issue a LUN_RESET TMR 214 * to struct scsi_device 215 */ 216 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, 217 u64 lun, int task, enum tcm_tmreq_table tmr) 218 { 219 struct se_cmd *se_cmd = NULL; 220 struct se_session *se_sess; 221 struct se_portal_group *se_tpg; 222 struct tcm_loop_nexus *tl_nexus; 223 struct tcm_loop_cmd *tl_cmd = NULL; 224 struct tcm_loop_tmr *tl_tmr = NULL; 225 int ret = TMR_FUNCTION_FAILED, rc; 226 227 /* 228 * Locate the tl_nexus and se_sess pointers 229 */ 230 tl_nexus = tl_tpg->tl_nexus; 231 if (!tl_nexus) { 232 pr_err("Unable to perform device reset without" 233 " active I_T Nexus\n"); 234 return ret; 235 } 236 237 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 238 if (!tl_cmd) { 239 pr_err("Unable to allocate memory for tl_cmd\n"); 240 return ret; 241 } 242 243 tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); 244 if (!tl_tmr) { 245 pr_err("Unable to allocate memory for tl_tmr\n"); 246 goto release; 247 } 248 init_waitqueue_head(&tl_tmr->tl_tmr_wait); 249 250 se_cmd = &tl_cmd->tl_se_cmd; 251 se_tpg = &tl_tpg->tl_se_tpg; 252 se_sess = tl_tpg->tl_nexus->se_sess; 253 /* 254 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 255 */ 256 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, 257 DMA_NONE, TCM_SIMPLE_TAG, 258 &tl_cmd->tl_sense_buf[0]); 259 260 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL); 261 if (rc < 0) 262 goto release; 263 264 if (tmr == TMR_ABORT_TASK) 265 se_cmd->se_tmr_req->ref_task_tag = task; 266 267 /* 268 * Locate the underlying TCM struct se_lun 269 */ 270 if (transport_lookup_tmr_lun(se_cmd, lun) < 0) { 271 ret = TMR_LUN_DOES_NOT_EXIST; 272 goto release; 273 } 274 /* 275 * Queue the TMR to TCM Core and sleep waiting for 276 * tcm_loop_queue_tm_rsp() to wake us up. 277 */ 278 transport_generic_handle_tmr(se_cmd); 279 wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); 280 /* 281 * The TMR LUN_RESET has completed, check the response status and 282 * then release allocations. 283 */ 284 ret = se_cmd->se_tmr_req->response; 285 release: 286 if (se_cmd) 287 transport_generic_free_cmd(se_cmd, 1); 288 else 289 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); 290 kfree(tl_tmr); 291 return ret; 292 } 293 294 static int tcm_loop_abort_task(struct scsi_cmnd *sc) 295 { 296 struct tcm_loop_hba *tl_hba; 297 struct tcm_loop_tpg *tl_tpg; 298 int ret = FAILED; 299 300 /* 301 * Locate the tcm_loop_hba_t pointer 302 */ 303 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 304 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 305 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 306 sc->request->tag, TMR_ABORT_TASK); 307 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 308 } 309 310 /* 311 * Called from SCSI EH process context to issue a LUN_RESET TMR 312 * to struct scsi_device 313 */ 314 static int tcm_loop_device_reset(struct scsi_cmnd *sc) 315 { 316 struct tcm_loop_hba *tl_hba; 317 struct tcm_loop_tpg *tl_tpg; 318 int ret = FAILED; 319 320 /* 321 * Locate the tcm_loop_hba_t pointer 322 */ 323 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 324 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 325 326 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 327 0, TMR_LUN_RESET); 328 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 329 } 330 331 static int tcm_loop_target_reset(struct scsi_cmnd *sc) 332 { 333 struct tcm_loop_hba *tl_hba; 334 struct tcm_loop_tpg *tl_tpg; 335 336 /* 337 * Locate the tcm_loop_hba_t pointer 338 */ 339 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 340 if (!tl_hba) { 341 pr_err("Unable to perform device reset without" 342 " active I_T Nexus\n"); 343 return FAILED; 344 } 345 /* 346 * Locate the tl_tpg pointer from TargetID in sc->device->id 347 */ 348 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 349 if (tl_tpg) { 350 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; 351 return SUCCESS; 352 } 353 return FAILED; 354 } 355 356 static int tcm_loop_slave_alloc(struct scsi_device *sd) 357 { 358 set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); 359 return 0; 360 } 361 362 static struct scsi_host_template tcm_loop_driver_template = { 363 .show_info = tcm_loop_show_info, 364 .proc_name = "tcm_loopback", 365 .name = "TCM_Loopback", 366 .queuecommand = tcm_loop_queuecommand, 367 .change_queue_depth = scsi_change_queue_depth, 368 .eh_abort_handler = tcm_loop_abort_task, 369 .eh_device_reset_handler = tcm_loop_device_reset, 370 .eh_target_reset_handler = tcm_loop_target_reset, 371 .can_queue = 1024, 372 .this_id = -1, 373 .sg_tablesize = 256, 374 .cmd_per_lun = 1024, 375 .max_sectors = 0xFFFF, 376 .use_clustering = DISABLE_CLUSTERING, 377 .slave_alloc = tcm_loop_slave_alloc, 378 .module = THIS_MODULE, 379 .use_blk_tags = 1, 380 .track_queue_depth = 1, 381 }; 382 383 static int tcm_loop_driver_probe(struct device *dev) 384 { 385 struct tcm_loop_hba *tl_hba; 386 struct Scsi_Host *sh; 387 int error, host_prot; 388 389 tl_hba = to_tcm_loop_hba(dev); 390 391 sh = scsi_host_alloc(&tcm_loop_driver_template, 392 sizeof(struct tcm_loop_hba)); 393 if (!sh) { 394 pr_err("Unable to allocate struct scsi_host\n"); 395 return -ENODEV; 396 } 397 tl_hba->sh = sh; 398 399 /* 400 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata 401 */ 402 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; 403 /* 404 * Setup single ID, Channel and LUN for now.. 405 */ 406 sh->max_id = 2; 407 sh->max_lun = 0; 408 sh->max_channel = 0; 409 sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; 410 411 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 412 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | 413 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; 414 415 scsi_host_set_prot(sh, host_prot); 416 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); 417 418 error = scsi_add_host(sh, &tl_hba->dev); 419 if (error) { 420 pr_err("%s: scsi_add_host failed\n", __func__); 421 scsi_host_put(sh); 422 return -ENODEV; 423 } 424 return 0; 425 } 426 427 static int tcm_loop_driver_remove(struct device *dev) 428 { 429 struct tcm_loop_hba *tl_hba; 430 struct Scsi_Host *sh; 431 432 tl_hba = to_tcm_loop_hba(dev); 433 sh = tl_hba->sh; 434 435 scsi_remove_host(sh); 436 scsi_host_put(sh); 437 return 0; 438 } 439 440 static void tcm_loop_release_adapter(struct device *dev) 441 { 442 struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev); 443 444 kfree(tl_hba); 445 } 446 447 /* 448 * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c 449 */ 450 static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id) 451 { 452 int ret; 453 454 tl_hba->dev.bus = &tcm_loop_lld_bus; 455 tl_hba->dev.parent = tcm_loop_primary; 456 tl_hba->dev.release = &tcm_loop_release_adapter; 457 dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id); 458 459 ret = device_register(&tl_hba->dev); 460 if (ret) { 461 pr_err("device_register() failed for" 462 " tl_hba->dev: %d\n", ret); 463 return -ENODEV; 464 } 465 466 return 0; 467 } 468 469 /* 470 * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated 471 * tcm_loop SCSI bus. 472 */ 473 static int tcm_loop_alloc_core_bus(void) 474 { 475 int ret; 476 477 tcm_loop_primary = root_device_register("tcm_loop_0"); 478 if (IS_ERR(tcm_loop_primary)) { 479 pr_err("Unable to allocate tcm_loop_primary\n"); 480 return PTR_ERR(tcm_loop_primary); 481 } 482 483 ret = bus_register(&tcm_loop_lld_bus); 484 if (ret) { 485 pr_err("bus_register() failed for tcm_loop_lld_bus\n"); 486 goto dev_unreg; 487 } 488 489 ret = driver_register(&tcm_loop_driverfs); 490 if (ret) { 491 pr_err("driver_register() failed for" 492 "tcm_loop_driverfs\n"); 493 goto bus_unreg; 494 } 495 496 pr_debug("Initialized TCM Loop Core Bus\n"); 497 return ret; 498 499 bus_unreg: 500 bus_unregister(&tcm_loop_lld_bus); 501 dev_unreg: 502 root_device_unregister(tcm_loop_primary); 503 return ret; 504 } 505 506 static void tcm_loop_release_core_bus(void) 507 { 508 driver_unregister(&tcm_loop_driverfs); 509 bus_unregister(&tcm_loop_lld_bus); 510 root_device_unregister(tcm_loop_primary); 511 512 pr_debug("Releasing TCM Loop Core BUS\n"); 513 } 514 515 static char *tcm_loop_get_fabric_name(void) 516 { 517 return "loopback"; 518 } 519 520 static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg) 521 { 522 return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 523 } 524 525 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg) 526 { 527 /* 528 * Return the passed NAA identifier for the Target Port 529 */ 530 return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0]; 531 } 532 533 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg) 534 { 535 /* 536 * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83 537 * to represent the SCSI Target Port. 538 */ 539 return tl_tpg(se_tpg)->tl_tpgt; 540 } 541 542 /* 543 * Returning (1) here allows for target_core_mod struct se_node_acl to be generated 544 * based upon the incoming fabric dependent SCSI Initiator Port 545 */ 546 static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg) 547 { 548 return 1; 549 } 550 551 static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg) 552 { 553 return 0; 554 } 555 556 /* 557 * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for 558 * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest 559 */ 560 static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg) 561 { 562 return 0; 563 } 564 565 /* 566 * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will 567 * never be called for TCM_Loop by target_core_fabric_configfs.c code. 568 * It has been added here as a nop for target_fabric_tf_ops_check() 569 */ 570 static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg) 571 { 572 return 0; 573 } 574 575 static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg) 576 { 577 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 578 tl_se_tpg); 579 return tl_tpg->tl_fabric_prot_type; 580 } 581 582 static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) 583 { 584 return 1; 585 } 586 587 static u32 tcm_loop_sess_get_index(struct se_session *se_sess) 588 { 589 return 1; 590 } 591 592 static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) 593 { 594 return; 595 } 596 597 static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) 598 { 599 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 600 struct tcm_loop_cmd, tl_se_cmd); 601 602 return tl_cmd->sc_cmd_state; 603 } 604 605 static int tcm_loop_shutdown_session(struct se_session *se_sess) 606 { 607 return 0; 608 } 609 610 static void tcm_loop_close_session(struct se_session *se_sess) 611 { 612 return; 613 }; 614 615 static int tcm_loop_write_pending(struct se_cmd *se_cmd) 616 { 617 /* 618 * Since Linux/SCSI has already sent down a struct scsi_cmnd 619 * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array 620 * memory, and memory has already been mapped to struct se_cmd->t_mem_list 621 * format with transport_generic_map_mem_to_cmd(). 622 * 623 * We now tell TCM to add this WRITE CDB directly into the TCM storage 624 * object execution queue. 625 */ 626 target_execute_cmd(se_cmd); 627 return 0; 628 } 629 630 static int tcm_loop_write_pending_status(struct se_cmd *se_cmd) 631 { 632 return 0; 633 } 634 635 static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) 636 { 637 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 638 struct tcm_loop_cmd, tl_se_cmd); 639 struct scsi_cmnd *sc = tl_cmd->sc; 640 641 pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p" 642 " cdb: 0x%02x\n", sc, sc->cmnd[0]); 643 644 sc->result = SAM_STAT_GOOD; 645 set_host_byte(sc, DID_OK); 646 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || 647 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) 648 scsi_set_resid(sc, se_cmd->residual_count); 649 sc->scsi_done(sc); 650 return 0; 651 } 652 653 static int tcm_loop_queue_status(struct se_cmd *se_cmd) 654 { 655 struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, 656 struct tcm_loop_cmd, tl_se_cmd); 657 struct scsi_cmnd *sc = tl_cmd->sc; 658 659 pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p" 660 " cdb: 0x%02x\n", sc, sc->cmnd[0]); 661 662 if (se_cmd->sense_buffer && 663 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 664 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 665 666 memcpy(sc->sense_buffer, se_cmd->sense_buffer, 667 SCSI_SENSE_BUFFERSIZE); 668 sc->result = SAM_STAT_CHECK_CONDITION; 669 set_driver_byte(sc, DRIVER_SENSE); 670 } else 671 sc->result = se_cmd->scsi_status; 672 673 set_host_byte(sc, DID_OK); 674 if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || 675 (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) 676 scsi_set_resid(sc, se_cmd->residual_count); 677 sc->scsi_done(sc); 678 return 0; 679 } 680 681 static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) 682 { 683 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; 684 struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr; 685 /* 686 * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead 687 * and wake up the wait_queue_head_t in tcm_loop_device_reset() 688 */ 689 atomic_set(&tl_tmr->tmr_complete, 1); 690 wake_up(&tl_tmr->tl_tmr_wait); 691 } 692 693 static void tcm_loop_aborted_task(struct se_cmd *se_cmd) 694 { 695 return; 696 } 697 698 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba) 699 { 700 switch (tl_hba->tl_proto_id) { 701 case SCSI_PROTOCOL_SAS: 702 return "SAS"; 703 case SCSI_PROTOCOL_FCP: 704 return "FCP"; 705 case SCSI_PROTOCOL_ISCSI: 706 return "iSCSI"; 707 default: 708 break; 709 } 710 711 return "Unknown"; 712 } 713 714 /* Start items for tcm_loop_port_cit */ 715 716 static int tcm_loop_port_link( 717 struct se_portal_group *se_tpg, 718 struct se_lun *lun) 719 { 720 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 721 struct tcm_loop_tpg, tl_se_tpg); 722 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 723 724 atomic_inc_mb(&tl_tpg->tl_tpg_port_count); 725 /* 726 * Add Linux/SCSI struct scsi_device by HCTL 727 */ 728 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); 729 730 pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n"); 731 return 0; 732 } 733 734 static void tcm_loop_port_unlink( 735 struct se_portal_group *se_tpg, 736 struct se_lun *se_lun) 737 { 738 struct scsi_device *sd; 739 struct tcm_loop_hba *tl_hba; 740 struct tcm_loop_tpg *tl_tpg; 741 742 tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg); 743 tl_hba = tl_tpg->tl_hba; 744 745 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, 746 se_lun->unpacked_lun); 747 if (!sd) { 748 pr_err("Unable to locate struct scsi_device for %d:%d:" 749 "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); 750 return; 751 } 752 /* 753 * Remove Linux/SCSI struct scsi_device by HCTL 754 */ 755 scsi_remove_device(sd); 756 scsi_device_put(sd); 757 758 atomic_dec_mb(&tl_tpg->tl_tpg_port_count); 759 760 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 761 } 762 763 /* End items for tcm_loop_port_cit */ 764 765 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show( 766 struct config_item *item, char *page) 767 { 768 struct se_portal_group *se_tpg = attrib_to_tpg(item); 769 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 770 tl_se_tpg); 771 772 return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type); 773 } 774 775 static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store( 776 struct config_item *item, const char *page, size_t count) 777 { 778 struct se_portal_group *se_tpg = attrib_to_tpg(item); 779 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, 780 tl_se_tpg); 781 unsigned long val; 782 int ret = kstrtoul(page, 0, &val); 783 784 if (ret) { 785 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); 786 return ret; 787 } 788 if (val != 0 && val != 1 && val != 3) { 789 pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); 790 return -EINVAL; 791 } 792 tl_tpg->tl_fabric_prot_type = val; 793 794 return count; 795 } 796 797 CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type); 798 799 static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = { 800 &tcm_loop_tpg_attrib_attr_fabric_prot_type, 801 NULL, 802 }; 803 804 /* Start items for tcm_loop_nexus_cit */ 805 806 static int tcm_loop_make_nexus( 807 struct tcm_loop_tpg *tl_tpg, 808 const char *name) 809 { 810 struct se_portal_group *se_tpg; 811 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 812 struct tcm_loop_nexus *tl_nexus; 813 int ret = -ENOMEM; 814 815 if (tl_tpg->tl_nexus) { 816 pr_debug("tl_tpg->tl_nexus already exists\n"); 817 return -EEXIST; 818 } 819 se_tpg = &tl_tpg->tl_se_tpg; 820 821 tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); 822 if (!tl_nexus) { 823 pr_err("Unable to allocate struct tcm_loop_nexus\n"); 824 return -ENOMEM; 825 } 826 /* 827 * Initialize the struct se_session pointer 828 */ 829 tl_nexus->se_sess = transport_init_session( 830 TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); 831 if (IS_ERR(tl_nexus->se_sess)) { 832 ret = PTR_ERR(tl_nexus->se_sess); 833 goto out; 834 } 835 /* 836 * Since we are running in 'demo mode' this call with generate a 837 * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI 838 * Initiator port name of the passed configfs group 'name'. 839 */ 840 tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl( 841 se_tpg, (unsigned char *)name); 842 if (!tl_nexus->se_sess->se_node_acl) { 843 transport_free_session(tl_nexus->se_sess); 844 goto out; 845 } 846 /* Now, register the I_T Nexus as active. */ 847 transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, 848 tl_nexus->se_sess, tl_nexus); 849 tl_tpg->tl_nexus = tl_nexus; 850 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 851 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 852 name); 853 return 0; 854 855 out: 856 kfree(tl_nexus); 857 return ret; 858 } 859 860 static int tcm_loop_drop_nexus( 861 struct tcm_loop_tpg *tpg) 862 { 863 struct se_session *se_sess; 864 struct tcm_loop_nexus *tl_nexus; 865 866 tl_nexus = tpg->tl_nexus; 867 if (!tl_nexus) 868 return -ENODEV; 869 870 se_sess = tl_nexus->se_sess; 871 if (!se_sess) 872 return -ENODEV; 873 874 if (atomic_read(&tpg->tl_tpg_port_count)) { 875 pr_err("Unable to remove TCM_Loop I_T Nexus with" 876 " active TPG port count: %d\n", 877 atomic_read(&tpg->tl_tpg_port_count)); 878 return -EPERM; 879 } 880 881 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" 882 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba), 883 tl_nexus->se_sess->se_node_acl->initiatorname); 884 /* 885 * Release the SCSI I_T Nexus to the emulated Target Port 886 */ 887 transport_deregister_session(tl_nexus->se_sess); 888 tpg->tl_nexus = NULL; 889 kfree(tl_nexus); 890 return 0; 891 } 892 893 /* End items for tcm_loop_nexus_cit */ 894 895 static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page) 896 { 897 struct se_portal_group *se_tpg = to_tpg(item); 898 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 899 struct tcm_loop_tpg, tl_se_tpg); 900 struct tcm_loop_nexus *tl_nexus; 901 ssize_t ret; 902 903 tl_nexus = tl_tpg->tl_nexus; 904 if (!tl_nexus) 905 return -ENODEV; 906 907 ret = snprintf(page, PAGE_SIZE, "%s\n", 908 tl_nexus->se_sess->se_node_acl->initiatorname); 909 910 return ret; 911 } 912 913 static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item, 914 const char *page, size_t count) 915 { 916 struct se_portal_group *se_tpg = to_tpg(item); 917 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 918 struct tcm_loop_tpg, tl_se_tpg); 919 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 920 unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr; 921 int ret; 922 /* 923 * Shutdown the active I_T nexus if 'NULL' is passed.. 924 */ 925 if (!strncmp(page, "NULL", 4)) { 926 ret = tcm_loop_drop_nexus(tl_tpg); 927 return (!ret) ? count : ret; 928 } 929 /* 930 * Otherwise make sure the passed virtual Initiator port WWN matches 931 * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call 932 * tcm_loop_make_nexus() 933 */ 934 if (strlen(page) >= TL_WWN_ADDR_LEN) { 935 pr_err("Emulated NAA Sas Address: %s, exceeds" 936 " max: %d\n", page, TL_WWN_ADDR_LEN); 937 return -EINVAL; 938 } 939 snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); 940 941 ptr = strstr(i_port, "naa."); 942 if (ptr) { 943 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { 944 pr_err("Passed SAS Initiator Port %s does not" 945 " match target port protoid: %s\n", i_port, 946 tcm_loop_dump_proto_id(tl_hba)); 947 return -EINVAL; 948 } 949 port_ptr = &i_port[0]; 950 goto check_newline; 951 } 952 ptr = strstr(i_port, "fc."); 953 if (ptr) { 954 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { 955 pr_err("Passed FCP Initiator Port %s does not" 956 " match target port protoid: %s\n", i_port, 957 tcm_loop_dump_proto_id(tl_hba)); 958 return -EINVAL; 959 } 960 port_ptr = &i_port[3]; /* Skip over "fc." */ 961 goto check_newline; 962 } 963 ptr = strstr(i_port, "iqn."); 964 if (ptr) { 965 if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { 966 pr_err("Passed iSCSI Initiator Port %s does not" 967 " match target port protoid: %s\n", i_port, 968 tcm_loop_dump_proto_id(tl_hba)); 969 return -EINVAL; 970 } 971 port_ptr = &i_port[0]; 972 goto check_newline; 973 } 974 pr_err("Unable to locate prefix for emulated Initiator Port:" 975 " %s\n", i_port); 976 return -EINVAL; 977 /* 978 * Clear any trailing newline for the NAA WWN 979 */ 980 check_newline: 981 if (i_port[strlen(i_port)-1] == '\n') 982 i_port[strlen(i_port)-1] = '\0'; 983 984 ret = tcm_loop_make_nexus(tl_tpg, port_ptr); 985 if (ret < 0) 986 return ret; 987 988 return count; 989 } 990 991 static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item, 992 char *page) 993 { 994 struct se_portal_group *se_tpg = to_tpg(item); 995 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 996 struct tcm_loop_tpg, tl_se_tpg); 997 const char *status = NULL; 998 ssize_t ret = -EINVAL; 999 1000 switch (tl_tpg->tl_transport_status) { 1001 case TCM_TRANSPORT_ONLINE: 1002 status = "online"; 1003 break; 1004 case TCM_TRANSPORT_OFFLINE: 1005 status = "offline"; 1006 break; 1007 default: 1008 break; 1009 } 1010 1011 if (status) 1012 ret = snprintf(page, PAGE_SIZE, "%s\n", status); 1013 1014 return ret; 1015 } 1016 1017 static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item, 1018 const char *page, size_t count) 1019 { 1020 struct se_portal_group *se_tpg = to_tpg(item); 1021 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1022 struct tcm_loop_tpg, tl_se_tpg); 1023 1024 if (!strncmp(page, "online", 6)) { 1025 tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; 1026 return count; 1027 } 1028 if (!strncmp(page, "offline", 7)) { 1029 tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE; 1030 if (tl_tpg->tl_nexus) { 1031 struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess; 1032 1033 core_allocate_nexus_loss_ua(tl_sess->se_node_acl); 1034 } 1035 return count; 1036 } 1037 return -EINVAL; 1038 } 1039 1040 CONFIGFS_ATTR(tcm_loop_tpg_, nexus); 1041 CONFIGFS_ATTR(tcm_loop_tpg_, transport_status); 1042 1043 static struct configfs_attribute *tcm_loop_tpg_attrs[] = { 1044 &tcm_loop_tpg_attr_nexus, 1045 &tcm_loop_tpg_attr_transport_status, 1046 NULL, 1047 }; 1048 1049 /* Start items for tcm_loop_naa_cit */ 1050 1051 static struct se_portal_group *tcm_loop_make_naa_tpg( 1052 struct se_wwn *wwn, 1053 struct config_group *group, 1054 const char *name) 1055 { 1056 struct tcm_loop_hba *tl_hba = container_of(wwn, 1057 struct tcm_loop_hba, tl_hba_wwn); 1058 struct tcm_loop_tpg *tl_tpg; 1059 int ret; 1060 unsigned long tpgt; 1061 1062 if (strstr(name, "tpgt_") != name) { 1063 pr_err("Unable to locate \"tpgt_#\" directory" 1064 " group\n"); 1065 return ERR_PTR(-EINVAL); 1066 } 1067 if (kstrtoul(name+5, 10, &tpgt)) 1068 return ERR_PTR(-EINVAL); 1069 1070 if (tpgt >= TL_TPGS_PER_HBA) { 1071 pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:" 1072 " %u\n", tpgt, TL_TPGS_PER_HBA); 1073 return ERR_PTR(-EINVAL); 1074 } 1075 tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; 1076 tl_tpg->tl_hba = tl_hba; 1077 tl_tpg->tl_tpgt = tpgt; 1078 /* 1079 * Register the tl_tpg as a emulated TCM Target Endpoint 1080 */ 1081 ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id); 1082 if (ret < 0) 1083 return ERR_PTR(-ENOMEM); 1084 1085 pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" 1086 " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba), 1087 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1088 1089 return &tl_tpg->tl_se_tpg; 1090 } 1091 1092 static void tcm_loop_drop_naa_tpg( 1093 struct se_portal_group *se_tpg) 1094 { 1095 struct se_wwn *wwn = se_tpg->se_tpg_wwn; 1096 struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, 1097 struct tcm_loop_tpg, tl_se_tpg); 1098 struct tcm_loop_hba *tl_hba; 1099 unsigned short tpgt; 1100 1101 tl_hba = tl_tpg->tl_hba; 1102 tpgt = tl_tpg->tl_tpgt; 1103 /* 1104 * Release the I_T Nexus for the Virtual target link if present 1105 */ 1106 tcm_loop_drop_nexus(tl_tpg); 1107 /* 1108 * Deregister the tl_tpg as a emulated TCM Target Endpoint 1109 */ 1110 core_tpg_deregister(se_tpg); 1111 1112 tl_tpg->tl_hba = NULL; 1113 tl_tpg->tl_tpgt = 0; 1114 1115 pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" 1116 " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), 1117 config_item_name(&wwn->wwn_group.cg_item), tpgt); 1118 } 1119 1120 /* End items for tcm_loop_naa_cit */ 1121 1122 /* Start items for tcm_loop_cit */ 1123 1124 static struct se_wwn *tcm_loop_make_scsi_hba( 1125 struct target_fabric_configfs *tf, 1126 struct config_group *group, 1127 const char *name) 1128 { 1129 struct tcm_loop_hba *tl_hba; 1130 struct Scsi_Host *sh; 1131 char *ptr; 1132 int ret, off = 0; 1133 1134 tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); 1135 if (!tl_hba) { 1136 pr_err("Unable to allocate struct tcm_loop_hba\n"); 1137 return ERR_PTR(-ENOMEM); 1138 } 1139 /* 1140 * Determine the emulated Protocol Identifier and Target Port Name 1141 * based on the incoming configfs directory name. 1142 */ 1143 ptr = strstr(name, "naa."); 1144 if (ptr) { 1145 tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS; 1146 goto check_len; 1147 } 1148 ptr = strstr(name, "fc."); 1149 if (ptr) { 1150 tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP; 1151 off = 3; /* Skip over "fc." */ 1152 goto check_len; 1153 } 1154 ptr = strstr(name, "iqn."); 1155 if (!ptr) { 1156 pr_err("Unable to locate prefix for emulated Target " 1157 "Port: %s\n", name); 1158 ret = -EINVAL; 1159 goto out; 1160 } 1161 tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI; 1162 1163 check_len: 1164 if (strlen(name) >= TL_WWN_ADDR_LEN) { 1165 pr_err("Emulated NAA %s Address: %s, exceeds" 1166 " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), 1167 TL_WWN_ADDR_LEN); 1168 ret = -EINVAL; 1169 goto out; 1170 } 1171 snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]); 1172 1173 /* 1174 * Call device_register(tl_hba->dev) to register the emulated 1175 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after 1176 * device_register() callbacks in tcm_loop_driver_probe() 1177 */ 1178 ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); 1179 if (ret) 1180 goto out; 1181 1182 sh = tl_hba->sh; 1183 tcm_loop_hba_no_cnt++; 1184 pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target" 1185 " %s Address: %s at Linux/SCSI Host ID: %d\n", 1186 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); 1187 1188 return &tl_hba->tl_hba_wwn; 1189 out: 1190 kfree(tl_hba); 1191 return ERR_PTR(ret); 1192 } 1193 1194 static void tcm_loop_drop_scsi_hba( 1195 struct se_wwn *wwn) 1196 { 1197 struct tcm_loop_hba *tl_hba = container_of(wwn, 1198 struct tcm_loop_hba, tl_hba_wwn); 1199 1200 pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target" 1201 " %s Address: %s at Linux/SCSI Host ID: %d\n", 1202 tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address, 1203 tl_hba->sh->host_no); 1204 /* 1205 * Call device_unregister() on the original tl_hba->dev. 1206 * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will 1207 * release *tl_hba; 1208 */ 1209 device_unregister(&tl_hba->dev); 1210 } 1211 1212 /* Start items for tcm_loop_cit */ 1213 static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page) 1214 { 1215 return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION); 1216 } 1217 1218 CONFIGFS_ATTR_RO(tcm_loop_wwn_, version); 1219 1220 static struct configfs_attribute *tcm_loop_wwn_attrs[] = { 1221 &tcm_loop_wwn_attr_version, 1222 NULL, 1223 }; 1224 1225 /* End items for tcm_loop_cit */ 1226 1227 static const struct target_core_fabric_ops loop_ops = { 1228 .module = THIS_MODULE, 1229 .name = "loopback", 1230 .get_fabric_name = tcm_loop_get_fabric_name, 1231 .tpg_get_wwn = tcm_loop_get_endpoint_wwn, 1232 .tpg_get_tag = tcm_loop_get_tag, 1233 .tpg_check_demo_mode = tcm_loop_check_demo_mode, 1234 .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache, 1235 .tpg_check_demo_mode_write_protect = 1236 tcm_loop_check_demo_mode_write_protect, 1237 .tpg_check_prod_mode_write_protect = 1238 tcm_loop_check_prod_mode_write_protect, 1239 .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only, 1240 .tpg_get_inst_index = tcm_loop_get_inst_index, 1241 .check_stop_free = tcm_loop_check_stop_free, 1242 .release_cmd = tcm_loop_release_cmd, 1243 .shutdown_session = tcm_loop_shutdown_session, 1244 .close_session = tcm_loop_close_session, 1245 .sess_get_index = tcm_loop_sess_get_index, 1246 .write_pending = tcm_loop_write_pending, 1247 .write_pending_status = tcm_loop_write_pending_status, 1248 .set_default_node_attributes = tcm_loop_set_default_node_attributes, 1249 .get_cmd_state = tcm_loop_get_cmd_state, 1250 .queue_data_in = tcm_loop_queue_data_in, 1251 .queue_status = tcm_loop_queue_status, 1252 .queue_tm_rsp = tcm_loop_queue_tm_rsp, 1253 .aborted_task = tcm_loop_aborted_task, 1254 .fabric_make_wwn = tcm_loop_make_scsi_hba, 1255 .fabric_drop_wwn = tcm_loop_drop_scsi_hba, 1256 .fabric_make_tpg = tcm_loop_make_naa_tpg, 1257 .fabric_drop_tpg = tcm_loop_drop_naa_tpg, 1258 .fabric_post_link = tcm_loop_port_link, 1259 .fabric_pre_unlink = tcm_loop_port_unlink, 1260 .tfc_wwn_attrs = tcm_loop_wwn_attrs, 1261 .tfc_tpg_base_attrs = tcm_loop_tpg_attrs, 1262 .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs, 1263 }; 1264 1265 static int __init tcm_loop_fabric_init(void) 1266 { 1267 int ret = -ENOMEM; 1268 1269 tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0); 1270 if (!tcm_loop_workqueue) 1271 goto out; 1272 1273 tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache", 1274 sizeof(struct tcm_loop_cmd), 1275 __alignof__(struct tcm_loop_cmd), 1276 0, NULL); 1277 if (!tcm_loop_cmd_cache) { 1278 pr_debug("kmem_cache_create() for" 1279 " tcm_loop_cmd_cache failed\n"); 1280 goto out_destroy_workqueue; 1281 } 1282 1283 ret = tcm_loop_alloc_core_bus(); 1284 if (ret) 1285 goto out_destroy_cache; 1286 1287 ret = target_register_template(&loop_ops); 1288 if (ret) 1289 goto out_release_core_bus; 1290 1291 return 0; 1292 1293 out_release_core_bus: 1294 tcm_loop_release_core_bus(); 1295 out_destroy_cache: 1296 kmem_cache_destroy(tcm_loop_cmd_cache); 1297 out_destroy_workqueue: 1298 destroy_workqueue(tcm_loop_workqueue); 1299 out: 1300 return ret; 1301 } 1302 1303 static void __exit tcm_loop_fabric_exit(void) 1304 { 1305 target_unregister_template(&loop_ops); 1306 tcm_loop_release_core_bus(); 1307 kmem_cache_destroy(tcm_loop_cmd_cache); 1308 destroy_workqueue(tcm_loop_workqueue); 1309 } 1310 1311 MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module"); 1312 MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>"); 1313 MODULE_LICENSE("GPL"); 1314 module_init(tcm_loop_fabric_init); 1315 module_exit(tcm_loop_fabric_exit); 1316