1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2012 RisingTide Systems LLC. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/blkdev.h> 32 #include <linux/spinlock.h> 33 #include <linux/kthread.h> 34 #include <linux/in.h> 35 #include <linux/cdrom.h> 36 #include <linux/module.h> 37 #include <linux/ratelimit.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_cmnd.h> 43 #include <scsi/scsi_tcq.h> 44 45 #include <target/target_core_base.h> 46 #include <target/target_core_backend.h> 47 #include <target/target_core_fabric.h> 48 #include <target/target_core_configfs.h> 49 50 #include "target_core_internal.h" 51 #include "target_core_alua.h" 52 #include "target_core_pr.h" 53 #include "target_core_ua.h" 54 55 static struct workqueue_struct *target_completion_wq; 56 static struct kmem_cache *se_sess_cache; 57 struct kmem_cache *se_ua_cache; 58 struct kmem_cache *t10_pr_reg_cache; 59 struct kmem_cache *t10_alua_lu_gp_cache; 60 struct kmem_cache *t10_alua_lu_gp_mem_cache; 61 struct kmem_cache *t10_alua_tg_pt_gp_cache; 62 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 63 64 static void transport_complete_task_attr(struct se_cmd *cmd); 65 static void transport_handle_queue_full(struct se_cmd *cmd, 66 struct se_device *dev); 67 static int transport_generic_get_mem(struct se_cmd *cmd); 68 static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); 69 static void transport_put_cmd(struct se_cmd *cmd); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( 122 "t10_alua_tg_pt_gp_mem_cache", 123 sizeof(struct t10_alua_tg_pt_gp_member), 124 __alignof__(struct t10_alua_tg_pt_gp_member), 125 0, NULL); 126 if (!t10_alua_tg_pt_gp_mem_cache) { 127 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 128 "mem_t failed\n"); 129 goto out_free_tg_pt_gp_cache; 130 } 131 132 target_completion_wq = alloc_workqueue("target_completion", 133 WQ_MEM_RECLAIM, 0); 134 if (!target_completion_wq) 135 goto out_free_tg_pt_gp_mem_cache; 136 137 return 0; 138 139 out_free_tg_pt_gp_mem_cache: 140 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 141 out_free_tg_pt_gp_cache: 142 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 143 out_free_lu_gp_mem_cache: 144 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 145 out_free_lu_gp_cache: 146 kmem_cache_destroy(t10_alua_lu_gp_cache); 147 out_free_pr_reg_cache: 148 kmem_cache_destroy(t10_pr_reg_cache); 149 out_free_ua_cache: 150 kmem_cache_destroy(se_ua_cache); 151 out_free_sess_cache: 152 kmem_cache_destroy(se_sess_cache); 153 out: 154 return -ENOMEM; 155 } 156 157 void release_se_kmem_caches(void) 158 { 159 destroy_workqueue(target_completion_wq); 160 kmem_cache_destroy(se_sess_cache); 161 kmem_cache_destroy(se_ua_cache); 162 kmem_cache_destroy(t10_pr_reg_cache); 163 kmem_cache_destroy(t10_alua_lu_gp_cache); 164 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 165 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 166 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 167 } 168 169 /* This code ensures unique mib indexes are handed out. */ 170 static DEFINE_SPINLOCK(scsi_mib_index_lock); 171 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 172 173 /* 174 * Allocate a new row index for the entry type specified 175 */ 176 u32 scsi_get_new_index(scsi_index_t type) 177 { 178 u32 new_index; 179 180 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 181 182 spin_lock(&scsi_mib_index_lock); 183 new_index = ++scsi_mib_index[type]; 184 spin_unlock(&scsi_mib_index_lock); 185 186 return new_index; 187 } 188 189 void transport_subsystem_check_init(void) 190 { 191 int ret; 192 static int sub_api_initialized; 193 194 if (sub_api_initialized) 195 return; 196 197 ret = request_module("target_core_iblock"); 198 if (ret != 0) 199 pr_err("Unable to load target_core_iblock\n"); 200 201 ret = request_module("target_core_file"); 202 if (ret != 0) 203 pr_err("Unable to load target_core_file\n"); 204 205 ret = request_module("target_core_pscsi"); 206 if (ret != 0) 207 pr_err("Unable to load target_core_pscsi\n"); 208 209 sub_api_initialized = 1; 210 } 211 212 struct se_session *transport_init_session(void) 213 { 214 struct se_session *se_sess; 215 216 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 217 if (!se_sess) { 218 pr_err("Unable to allocate struct se_session from" 219 " se_sess_cache\n"); 220 return ERR_PTR(-ENOMEM); 221 } 222 INIT_LIST_HEAD(&se_sess->sess_list); 223 INIT_LIST_HEAD(&se_sess->sess_acl_list); 224 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 225 spin_lock_init(&se_sess->sess_cmd_lock); 226 kref_init(&se_sess->sess_kref); 227 228 return se_sess; 229 } 230 EXPORT_SYMBOL(transport_init_session); 231 232 /* 233 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 234 */ 235 void __transport_register_session( 236 struct se_portal_group *se_tpg, 237 struct se_node_acl *se_nacl, 238 struct se_session *se_sess, 239 void *fabric_sess_ptr) 240 { 241 unsigned char buf[PR_REG_ISID_LEN]; 242 243 se_sess->se_tpg = se_tpg; 244 se_sess->fabric_sess_ptr = fabric_sess_ptr; 245 /* 246 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 247 * 248 * Only set for struct se_session's that will actually be moving I/O. 249 * eg: *NOT* discovery sessions. 250 */ 251 if (se_nacl) { 252 /* 253 * If the fabric module supports an ISID based TransportID, 254 * save this value in binary from the fabric I_T Nexus now. 255 */ 256 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 257 memset(&buf[0], 0, PR_REG_ISID_LEN); 258 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 259 &buf[0], PR_REG_ISID_LEN); 260 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 261 } 262 kref_get(&se_nacl->acl_kref); 263 264 spin_lock_irq(&se_nacl->nacl_sess_lock); 265 /* 266 * The se_nacl->nacl_sess pointer will be set to the 267 * last active I_T Nexus for each struct se_node_acl. 268 */ 269 se_nacl->nacl_sess = se_sess; 270 271 list_add_tail(&se_sess->sess_acl_list, 272 &se_nacl->acl_sess_list); 273 spin_unlock_irq(&se_nacl->nacl_sess_lock); 274 } 275 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 276 277 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 278 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 279 } 280 EXPORT_SYMBOL(__transport_register_session); 281 282 void transport_register_session( 283 struct se_portal_group *se_tpg, 284 struct se_node_acl *se_nacl, 285 struct se_session *se_sess, 286 void *fabric_sess_ptr) 287 { 288 unsigned long flags; 289 290 spin_lock_irqsave(&se_tpg->session_lock, flags); 291 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 292 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 293 } 294 EXPORT_SYMBOL(transport_register_session); 295 296 static void target_release_session(struct kref *kref) 297 { 298 struct se_session *se_sess = container_of(kref, 299 struct se_session, sess_kref); 300 struct se_portal_group *se_tpg = se_sess->se_tpg; 301 302 se_tpg->se_tpg_tfo->close_session(se_sess); 303 } 304 305 void target_get_session(struct se_session *se_sess) 306 { 307 kref_get(&se_sess->sess_kref); 308 } 309 EXPORT_SYMBOL(target_get_session); 310 311 void target_put_session(struct se_session *se_sess) 312 { 313 struct se_portal_group *tpg = se_sess->se_tpg; 314 315 if (tpg->se_tpg_tfo->put_session != NULL) { 316 tpg->se_tpg_tfo->put_session(se_sess); 317 return; 318 } 319 kref_put(&se_sess->sess_kref, target_release_session); 320 } 321 EXPORT_SYMBOL(target_put_session); 322 323 static void target_complete_nacl(struct kref *kref) 324 { 325 struct se_node_acl *nacl = container_of(kref, 326 struct se_node_acl, acl_kref); 327 328 complete(&nacl->acl_free_comp); 329 } 330 331 void target_put_nacl(struct se_node_acl *nacl) 332 { 333 kref_put(&nacl->acl_kref, target_complete_nacl); 334 } 335 336 void transport_deregister_session_configfs(struct se_session *se_sess) 337 { 338 struct se_node_acl *se_nacl; 339 unsigned long flags; 340 /* 341 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 342 */ 343 se_nacl = se_sess->se_node_acl; 344 if (se_nacl) { 345 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 346 if (se_nacl->acl_stop == 0) 347 list_del(&se_sess->sess_acl_list); 348 /* 349 * If the session list is empty, then clear the pointer. 350 * Otherwise, set the struct se_session pointer from the tail 351 * element of the per struct se_node_acl active session list. 352 */ 353 if (list_empty(&se_nacl->acl_sess_list)) 354 se_nacl->nacl_sess = NULL; 355 else { 356 se_nacl->nacl_sess = container_of( 357 se_nacl->acl_sess_list.prev, 358 struct se_session, sess_acl_list); 359 } 360 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 361 } 362 } 363 EXPORT_SYMBOL(transport_deregister_session_configfs); 364 365 void transport_free_session(struct se_session *se_sess) 366 { 367 kmem_cache_free(se_sess_cache, se_sess); 368 } 369 EXPORT_SYMBOL(transport_free_session); 370 371 void transport_deregister_session(struct se_session *se_sess) 372 { 373 struct se_portal_group *se_tpg = se_sess->se_tpg; 374 struct target_core_fabric_ops *se_tfo; 375 struct se_node_acl *se_nacl; 376 unsigned long flags; 377 bool comp_nacl = true; 378 379 if (!se_tpg) { 380 transport_free_session(se_sess); 381 return; 382 } 383 se_tfo = se_tpg->se_tpg_tfo; 384 385 spin_lock_irqsave(&se_tpg->session_lock, flags); 386 list_del(&se_sess->sess_list); 387 se_sess->se_tpg = NULL; 388 se_sess->fabric_sess_ptr = NULL; 389 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 390 391 /* 392 * Determine if we need to do extra work for this initiator node's 393 * struct se_node_acl if it had been previously dynamically generated. 394 */ 395 se_nacl = se_sess->se_node_acl; 396 397 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 398 if (se_nacl && se_nacl->dynamic_node_acl) { 399 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 400 list_del(&se_nacl->acl_list); 401 se_tpg->num_node_acls--; 402 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 403 core_tpg_wait_for_nacl_pr_ref(se_nacl); 404 core_free_device_list_for_node(se_nacl, se_tpg); 405 se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); 406 407 comp_nacl = false; 408 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 409 } 410 } 411 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 412 413 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 414 se_tpg->se_tpg_tfo->get_fabric_name()); 415 /* 416 * If last kref is dropping now for an explict NodeACL, awake sleeping 417 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 418 * removal context. 419 */ 420 if (se_nacl && comp_nacl == true) 421 target_put_nacl(se_nacl); 422 423 transport_free_session(se_sess); 424 } 425 EXPORT_SYMBOL(transport_deregister_session); 426 427 /* 428 * Called with cmd->t_state_lock held. 429 */ 430 static void target_remove_from_state_list(struct se_cmd *cmd) 431 { 432 struct se_device *dev = cmd->se_dev; 433 unsigned long flags; 434 435 if (!dev) 436 return; 437 438 if (cmd->transport_state & CMD_T_BUSY) 439 return; 440 441 spin_lock_irqsave(&dev->execute_task_lock, flags); 442 if (cmd->state_active) { 443 list_del(&cmd->state_list); 444 cmd->state_active = false; 445 } 446 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 447 } 448 449 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists) 450 { 451 unsigned long flags; 452 453 spin_lock_irqsave(&cmd->t_state_lock, flags); 454 /* 455 * Determine if IOCTL context caller in requesting the stopping of this 456 * command for LUN shutdown purposes. 457 */ 458 if (cmd->transport_state & CMD_T_LUN_STOP) { 459 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", 460 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); 461 462 cmd->transport_state &= ~CMD_T_ACTIVE; 463 if (remove_from_lists) 464 target_remove_from_state_list(cmd); 465 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 466 467 complete(&cmd->transport_lun_stop_comp); 468 return 1; 469 } 470 471 if (remove_from_lists) { 472 target_remove_from_state_list(cmd); 473 474 /* 475 * Clear struct se_cmd->se_lun before the handoff to FE. 476 */ 477 cmd->se_lun = NULL; 478 } 479 480 /* 481 * Determine if frontend context caller is requesting the stopping of 482 * this command for frontend exceptions. 483 */ 484 if (cmd->transport_state & CMD_T_STOP) { 485 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 486 __func__, __LINE__, 487 cmd->se_tfo->get_task_tag(cmd)); 488 489 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 490 491 complete(&cmd->t_transport_stop_comp); 492 return 1; 493 } 494 495 cmd->transport_state &= ~CMD_T_ACTIVE; 496 if (remove_from_lists) { 497 /* 498 * Some fabric modules like tcm_loop can release 499 * their internally allocated I/O reference now and 500 * struct se_cmd now. 501 * 502 * Fabric modules are expected to return '1' here if the 503 * se_cmd being passed is released at this point, 504 * or zero if not being released. 505 */ 506 if (cmd->se_tfo->check_stop_free != NULL) { 507 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 508 return cmd->se_tfo->check_stop_free(cmd); 509 } 510 } 511 512 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 513 return 0; 514 } 515 516 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 517 { 518 return transport_cmd_check_stop(cmd, true); 519 } 520 521 static void transport_lun_remove_cmd(struct se_cmd *cmd) 522 { 523 struct se_lun *lun = cmd->se_lun; 524 unsigned long flags; 525 526 if (!lun) 527 return; 528 529 spin_lock_irqsave(&cmd->t_state_lock, flags); 530 if (cmd->transport_state & CMD_T_DEV_ACTIVE) { 531 cmd->transport_state &= ~CMD_T_DEV_ACTIVE; 532 target_remove_from_state_list(cmd); 533 } 534 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 535 536 spin_lock_irqsave(&lun->lun_cmd_lock, flags); 537 if (!list_empty(&cmd->se_lun_node)) 538 list_del_init(&cmd->se_lun_node); 539 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); 540 } 541 542 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 543 { 544 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 545 transport_lun_remove_cmd(cmd); 546 547 if (transport_cmd_check_stop_to_fabric(cmd)) 548 return; 549 if (remove) 550 transport_put_cmd(cmd); 551 } 552 553 static void target_complete_failure_work(struct work_struct *work) 554 { 555 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 556 557 transport_generic_request_failure(cmd, 558 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 559 } 560 561 /* 562 * Used when asking transport to copy Sense Data from the underlying 563 * Linux/SCSI struct scsi_cmnd 564 */ 565 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 566 { 567 struct se_device *dev = cmd->se_dev; 568 569 WARN_ON(!cmd->se_lun); 570 571 if (!dev) 572 return NULL; 573 574 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 575 return NULL; 576 577 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 578 579 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 580 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 581 return cmd->sense_buffer; 582 } 583 584 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 585 { 586 struct se_device *dev = cmd->se_dev; 587 int success = scsi_status == GOOD; 588 unsigned long flags; 589 590 cmd->scsi_status = scsi_status; 591 592 593 spin_lock_irqsave(&cmd->t_state_lock, flags); 594 cmd->transport_state &= ~CMD_T_BUSY; 595 596 if (dev && dev->transport->transport_complete) { 597 dev->transport->transport_complete(cmd, 598 cmd->t_data_sg, 599 transport_get_sense_buffer(cmd)); 600 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 601 success = 1; 602 } 603 604 /* 605 * See if we are waiting to complete for an exception condition. 606 */ 607 if (cmd->transport_state & CMD_T_REQUEST_STOP) { 608 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 609 complete(&cmd->task_stop_comp); 610 return; 611 } 612 613 if (!success) 614 cmd->transport_state |= CMD_T_FAILED; 615 616 /* 617 * Check for case where an explict ABORT_TASK has been received 618 * and transport_wait_for_tasks() will be waiting for completion.. 619 */ 620 if (cmd->transport_state & CMD_T_ABORTED && 621 cmd->transport_state & CMD_T_STOP) { 622 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 623 complete(&cmd->t_transport_stop_comp); 624 return; 625 } else if (cmd->transport_state & CMD_T_FAILED) { 626 INIT_WORK(&cmd->work, target_complete_failure_work); 627 } else { 628 INIT_WORK(&cmd->work, target_complete_ok_work); 629 } 630 631 cmd->t_state = TRANSPORT_COMPLETE; 632 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 633 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 634 635 queue_work(target_completion_wq, &cmd->work); 636 } 637 EXPORT_SYMBOL(target_complete_cmd); 638 639 static void target_add_to_state_list(struct se_cmd *cmd) 640 { 641 struct se_device *dev = cmd->se_dev; 642 unsigned long flags; 643 644 spin_lock_irqsave(&dev->execute_task_lock, flags); 645 if (!cmd->state_active) { 646 list_add_tail(&cmd->state_list, &dev->state_list); 647 cmd->state_active = true; 648 } 649 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 650 } 651 652 /* 653 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 654 */ 655 static void transport_write_pending_qf(struct se_cmd *cmd); 656 static void transport_complete_qf(struct se_cmd *cmd); 657 658 void target_qf_do_work(struct work_struct *work) 659 { 660 struct se_device *dev = container_of(work, struct se_device, 661 qf_work_queue); 662 LIST_HEAD(qf_cmd_list); 663 struct se_cmd *cmd, *cmd_tmp; 664 665 spin_lock_irq(&dev->qf_cmd_lock); 666 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 667 spin_unlock_irq(&dev->qf_cmd_lock); 668 669 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 670 list_del(&cmd->se_qf_node); 671 atomic_dec(&dev->dev_qf_count); 672 smp_mb__after_atomic_dec(); 673 674 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 675 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 676 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 677 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 678 : "UNKNOWN"); 679 680 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 681 transport_write_pending_qf(cmd); 682 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 683 transport_complete_qf(cmd); 684 } 685 } 686 687 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 688 { 689 switch (cmd->data_direction) { 690 case DMA_NONE: 691 return "NONE"; 692 case DMA_FROM_DEVICE: 693 return "READ"; 694 case DMA_TO_DEVICE: 695 return "WRITE"; 696 case DMA_BIDIRECTIONAL: 697 return "BIDI"; 698 default: 699 break; 700 } 701 702 return "UNKNOWN"; 703 } 704 705 void transport_dump_dev_state( 706 struct se_device *dev, 707 char *b, 708 int *bl) 709 { 710 *bl += sprintf(b + *bl, "Status: "); 711 if (dev->export_count) 712 *bl += sprintf(b + *bl, "ACTIVATED"); 713 else 714 *bl += sprintf(b + *bl, "DEACTIVATED"); 715 716 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 717 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 718 dev->dev_attrib.block_size, 719 dev->dev_attrib.hw_max_sectors); 720 *bl += sprintf(b + *bl, " "); 721 } 722 723 void transport_dump_vpd_proto_id( 724 struct t10_vpd *vpd, 725 unsigned char *p_buf, 726 int p_buf_len) 727 { 728 unsigned char buf[VPD_TMP_BUF_SIZE]; 729 int len; 730 731 memset(buf, 0, VPD_TMP_BUF_SIZE); 732 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 733 734 switch (vpd->protocol_identifier) { 735 case 0x00: 736 sprintf(buf+len, "Fibre Channel\n"); 737 break; 738 case 0x10: 739 sprintf(buf+len, "Parallel SCSI\n"); 740 break; 741 case 0x20: 742 sprintf(buf+len, "SSA\n"); 743 break; 744 case 0x30: 745 sprintf(buf+len, "IEEE 1394\n"); 746 break; 747 case 0x40: 748 sprintf(buf+len, "SCSI Remote Direct Memory Access" 749 " Protocol\n"); 750 break; 751 case 0x50: 752 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 753 break; 754 case 0x60: 755 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 756 break; 757 case 0x70: 758 sprintf(buf+len, "Automation/Drive Interface Transport" 759 " Protocol\n"); 760 break; 761 case 0x80: 762 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 763 break; 764 default: 765 sprintf(buf+len, "Unknown 0x%02x\n", 766 vpd->protocol_identifier); 767 break; 768 } 769 770 if (p_buf) 771 strncpy(p_buf, buf, p_buf_len); 772 else 773 pr_debug("%s", buf); 774 } 775 776 void 777 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 778 { 779 /* 780 * Check if the Protocol Identifier Valid (PIV) bit is set.. 781 * 782 * from spc3r23.pdf section 7.5.1 783 */ 784 if (page_83[1] & 0x80) { 785 vpd->protocol_identifier = (page_83[0] & 0xf0); 786 vpd->protocol_identifier_set = 1; 787 transport_dump_vpd_proto_id(vpd, NULL, 0); 788 } 789 } 790 EXPORT_SYMBOL(transport_set_vpd_proto_id); 791 792 int transport_dump_vpd_assoc( 793 struct t10_vpd *vpd, 794 unsigned char *p_buf, 795 int p_buf_len) 796 { 797 unsigned char buf[VPD_TMP_BUF_SIZE]; 798 int ret = 0; 799 int len; 800 801 memset(buf, 0, VPD_TMP_BUF_SIZE); 802 len = sprintf(buf, "T10 VPD Identifier Association: "); 803 804 switch (vpd->association) { 805 case 0x00: 806 sprintf(buf+len, "addressed logical unit\n"); 807 break; 808 case 0x10: 809 sprintf(buf+len, "target port\n"); 810 break; 811 case 0x20: 812 sprintf(buf+len, "SCSI target device\n"); 813 break; 814 default: 815 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 816 ret = -EINVAL; 817 break; 818 } 819 820 if (p_buf) 821 strncpy(p_buf, buf, p_buf_len); 822 else 823 pr_debug("%s", buf); 824 825 return ret; 826 } 827 828 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 829 { 830 /* 831 * The VPD identification association.. 832 * 833 * from spc3r23.pdf Section 7.6.3.1 Table 297 834 */ 835 vpd->association = (page_83[1] & 0x30); 836 return transport_dump_vpd_assoc(vpd, NULL, 0); 837 } 838 EXPORT_SYMBOL(transport_set_vpd_assoc); 839 840 int transport_dump_vpd_ident_type( 841 struct t10_vpd *vpd, 842 unsigned char *p_buf, 843 int p_buf_len) 844 { 845 unsigned char buf[VPD_TMP_BUF_SIZE]; 846 int ret = 0; 847 int len; 848 849 memset(buf, 0, VPD_TMP_BUF_SIZE); 850 len = sprintf(buf, "T10 VPD Identifier Type: "); 851 852 switch (vpd->device_identifier_type) { 853 case 0x00: 854 sprintf(buf+len, "Vendor specific\n"); 855 break; 856 case 0x01: 857 sprintf(buf+len, "T10 Vendor ID based\n"); 858 break; 859 case 0x02: 860 sprintf(buf+len, "EUI-64 based\n"); 861 break; 862 case 0x03: 863 sprintf(buf+len, "NAA\n"); 864 break; 865 case 0x04: 866 sprintf(buf+len, "Relative target port identifier\n"); 867 break; 868 case 0x08: 869 sprintf(buf+len, "SCSI name string\n"); 870 break; 871 default: 872 sprintf(buf+len, "Unsupported: 0x%02x\n", 873 vpd->device_identifier_type); 874 ret = -EINVAL; 875 break; 876 } 877 878 if (p_buf) { 879 if (p_buf_len < strlen(buf)+1) 880 return -EINVAL; 881 strncpy(p_buf, buf, p_buf_len); 882 } else { 883 pr_debug("%s", buf); 884 } 885 886 return ret; 887 } 888 889 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 890 { 891 /* 892 * The VPD identifier type.. 893 * 894 * from spc3r23.pdf Section 7.6.3.1 Table 298 895 */ 896 vpd->device_identifier_type = (page_83[1] & 0x0f); 897 return transport_dump_vpd_ident_type(vpd, NULL, 0); 898 } 899 EXPORT_SYMBOL(transport_set_vpd_ident_type); 900 901 int transport_dump_vpd_ident( 902 struct t10_vpd *vpd, 903 unsigned char *p_buf, 904 int p_buf_len) 905 { 906 unsigned char buf[VPD_TMP_BUF_SIZE]; 907 int ret = 0; 908 909 memset(buf, 0, VPD_TMP_BUF_SIZE); 910 911 switch (vpd->device_identifier_code_set) { 912 case 0x01: /* Binary */ 913 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", 914 &vpd->device_identifier[0]); 915 break; 916 case 0x02: /* ASCII */ 917 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", 918 &vpd->device_identifier[0]); 919 break; 920 case 0x03: /* UTF-8 */ 921 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", 922 &vpd->device_identifier[0]); 923 break; 924 default: 925 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 926 " 0x%02x", vpd->device_identifier_code_set); 927 ret = -EINVAL; 928 break; 929 } 930 931 if (p_buf) 932 strncpy(p_buf, buf, p_buf_len); 933 else 934 pr_debug("%s", buf); 935 936 return ret; 937 } 938 939 int 940 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 941 { 942 static const char hex_str[] = "0123456789abcdef"; 943 int j = 0, i = 4; /* offset to start of the identifier */ 944 945 /* 946 * The VPD Code Set (encoding) 947 * 948 * from spc3r23.pdf Section 7.6.3.1 Table 296 949 */ 950 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 951 switch (vpd->device_identifier_code_set) { 952 case 0x01: /* Binary */ 953 vpd->device_identifier[j++] = 954 hex_str[vpd->device_identifier_type]; 955 while (i < (4 + page_83[3])) { 956 vpd->device_identifier[j++] = 957 hex_str[(page_83[i] & 0xf0) >> 4]; 958 vpd->device_identifier[j++] = 959 hex_str[page_83[i] & 0x0f]; 960 i++; 961 } 962 break; 963 case 0x02: /* ASCII */ 964 case 0x03: /* UTF-8 */ 965 while (i < (4 + page_83[3])) 966 vpd->device_identifier[j++] = page_83[i++]; 967 break; 968 default: 969 break; 970 } 971 972 return transport_dump_vpd_ident(vpd, NULL, 0); 973 } 974 EXPORT_SYMBOL(transport_set_vpd_ident); 975 976 sense_reason_t 977 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 978 { 979 struct se_device *dev = cmd->se_dev; 980 981 if (cmd->unknown_data_length) { 982 cmd->data_length = size; 983 } else if (size != cmd->data_length) { 984 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 985 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 986 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 987 cmd->data_length, size, cmd->t_task_cdb[0]); 988 989 if (cmd->data_direction == DMA_TO_DEVICE) { 990 pr_err("Rejecting underflow/overflow" 991 " WRITE data\n"); 992 return TCM_INVALID_CDB_FIELD; 993 } 994 /* 995 * Reject READ_* or WRITE_* with overflow/underflow for 996 * type SCF_SCSI_DATA_CDB. 997 */ 998 if (dev->dev_attrib.block_size != 512) { 999 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1000 " CDB on non 512-byte sector setup subsystem" 1001 " plugin: %s\n", dev->transport->name); 1002 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1003 return TCM_INVALID_CDB_FIELD; 1004 } 1005 /* 1006 * For the overflow case keep the existing fabric provided 1007 * ->data_length. Otherwise for the underflow case, reset 1008 * ->data_length to the smaller SCSI expected data transfer 1009 * length. 1010 */ 1011 if (size > cmd->data_length) { 1012 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1013 cmd->residual_count = (size - cmd->data_length); 1014 } else { 1015 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1016 cmd->residual_count = (cmd->data_length - size); 1017 cmd->data_length = size; 1018 } 1019 } 1020 1021 return 0; 1022 1023 } 1024 1025 /* 1026 * Used by fabric modules containing a local struct se_cmd within their 1027 * fabric dependent per I/O descriptor. 1028 */ 1029 void transport_init_se_cmd( 1030 struct se_cmd *cmd, 1031 struct target_core_fabric_ops *tfo, 1032 struct se_session *se_sess, 1033 u32 data_length, 1034 int data_direction, 1035 int task_attr, 1036 unsigned char *sense_buffer) 1037 { 1038 INIT_LIST_HEAD(&cmd->se_lun_node); 1039 INIT_LIST_HEAD(&cmd->se_delayed_node); 1040 INIT_LIST_HEAD(&cmd->se_qf_node); 1041 INIT_LIST_HEAD(&cmd->se_cmd_list); 1042 INIT_LIST_HEAD(&cmd->state_list); 1043 init_completion(&cmd->transport_lun_fe_stop_comp); 1044 init_completion(&cmd->transport_lun_stop_comp); 1045 init_completion(&cmd->t_transport_stop_comp); 1046 init_completion(&cmd->cmd_wait_comp); 1047 init_completion(&cmd->task_stop_comp); 1048 spin_lock_init(&cmd->t_state_lock); 1049 cmd->transport_state = CMD_T_DEV_ACTIVE; 1050 1051 cmd->se_tfo = tfo; 1052 cmd->se_sess = se_sess; 1053 cmd->data_length = data_length; 1054 cmd->data_direction = data_direction; 1055 cmd->sam_task_attr = task_attr; 1056 cmd->sense_buffer = sense_buffer; 1057 1058 cmd->state_active = false; 1059 } 1060 EXPORT_SYMBOL(transport_init_se_cmd); 1061 1062 static sense_reason_t 1063 transport_check_alloc_task_attr(struct se_cmd *cmd) 1064 { 1065 struct se_device *dev = cmd->se_dev; 1066 1067 /* 1068 * Check if SAM Task Attribute emulation is enabled for this 1069 * struct se_device storage object 1070 */ 1071 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1072 return 0; 1073 1074 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1075 pr_debug("SAM Task Attribute ACA" 1076 " emulation is not supported\n"); 1077 return TCM_INVALID_CDB_FIELD; 1078 } 1079 /* 1080 * Used to determine when ORDERED commands should go from 1081 * Dormant to Active status. 1082 */ 1083 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); 1084 smp_mb__after_atomic_inc(); 1085 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1086 cmd->se_ordered_id, cmd->sam_task_attr, 1087 dev->transport->name); 1088 return 0; 1089 } 1090 1091 sense_reason_t 1092 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1093 { 1094 struct se_device *dev = cmd->se_dev; 1095 unsigned long flags; 1096 sense_reason_t ret; 1097 1098 /* 1099 * Ensure that the received CDB is less than the max (252 + 8) bytes 1100 * for VARIABLE_LENGTH_CMD 1101 */ 1102 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1103 pr_err("Received SCSI CDB with command_size: %d that" 1104 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1105 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1106 return TCM_INVALID_CDB_FIELD; 1107 } 1108 /* 1109 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1110 * allocate the additional extended CDB buffer now.. Otherwise 1111 * setup the pointer from __t_task_cdb to t_task_cdb. 1112 */ 1113 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1114 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1115 GFP_KERNEL); 1116 if (!cmd->t_task_cdb) { 1117 pr_err("Unable to allocate cmd->t_task_cdb" 1118 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1119 scsi_command_size(cdb), 1120 (unsigned long)sizeof(cmd->__t_task_cdb)); 1121 return TCM_OUT_OF_RESOURCES; 1122 } 1123 } else 1124 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1125 /* 1126 * Copy the original CDB into cmd-> 1127 */ 1128 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1129 1130 /* 1131 * Check for an existing UNIT ATTENTION condition 1132 */ 1133 ret = target_scsi3_ua_check(cmd); 1134 if (ret) 1135 return ret; 1136 1137 ret = target_alua_state_check(cmd); 1138 if (ret) 1139 return ret; 1140 1141 ret = target_check_reservation(cmd); 1142 if (ret) 1143 return ret; 1144 1145 ret = dev->transport->parse_cdb(cmd); 1146 if (ret) 1147 return ret; 1148 1149 ret = transport_check_alloc_task_attr(cmd); 1150 if (ret) 1151 return ret; 1152 1153 spin_lock_irqsave(&cmd->t_state_lock, flags); 1154 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1155 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 1156 1157 spin_lock(&cmd->se_lun->lun_sep_lock); 1158 if (cmd->se_lun->lun_sep) 1159 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; 1160 spin_unlock(&cmd->se_lun->lun_sep_lock); 1161 return 0; 1162 } 1163 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1164 1165 /* 1166 * Used by fabric module frontends to queue tasks directly. 1167 * Many only be used from process context only 1168 */ 1169 int transport_handle_cdb_direct( 1170 struct se_cmd *cmd) 1171 { 1172 sense_reason_t ret; 1173 1174 if (!cmd->se_lun) { 1175 dump_stack(); 1176 pr_err("cmd->se_lun is NULL\n"); 1177 return -EINVAL; 1178 } 1179 if (in_interrupt()) { 1180 dump_stack(); 1181 pr_err("transport_generic_handle_cdb cannot be called" 1182 " from interrupt context\n"); 1183 return -EINVAL; 1184 } 1185 /* 1186 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1187 * outstanding descriptors are handled correctly during shutdown via 1188 * transport_wait_for_tasks() 1189 * 1190 * Also, we don't take cmd->t_state_lock here as we only expect 1191 * this to be called for initial descriptor submission. 1192 */ 1193 cmd->t_state = TRANSPORT_NEW_CMD; 1194 cmd->transport_state |= CMD_T_ACTIVE; 1195 1196 /* 1197 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1198 * so follow TRANSPORT_NEW_CMD processing thread context usage 1199 * and call transport_generic_request_failure() if necessary.. 1200 */ 1201 ret = transport_generic_new_cmd(cmd); 1202 if (ret) 1203 transport_generic_request_failure(cmd, ret); 1204 return 0; 1205 } 1206 EXPORT_SYMBOL(transport_handle_cdb_direct); 1207 1208 static sense_reason_t 1209 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1210 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1211 { 1212 if (!sgl || !sgl_count) 1213 return 0; 1214 1215 /* 1216 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1217 * scatterlists already have been set to follow what the fabric 1218 * passes for the original expected data transfer length. 1219 */ 1220 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1221 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1222 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1223 return TCM_INVALID_CDB_FIELD; 1224 } 1225 1226 cmd->t_data_sg = sgl; 1227 cmd->t_data_nents = sgl_count; 1228 1229 if (sgl_bidi && sgl_bidi_count) { 1230 cmd->t_bidi_data_sg = sgl_bidi; 1231 cmd->t_bidi_data_nents = sgl_bidi_count; 1232 } 1233 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1234 return 0; 1235 } 1236 1237 /* 1238 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1239 * se_cmd + use pre-allocated SGL memory. 1240 * 1241 * @se_cmd: command descriptor to submit 1242 * @se_sess: associated se_sess for endpoint 1243 * @cdb: pointer to SCSI CDB 1244 * @sense: pointer to SCSI sense buffer 1245 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1246 * @data_length: fabric expected data transfer length 1247 * @task_addr: SAM task attribute 1248 * @data_dir: DMA data direction 1249 * @flags: flags for command submission from target_sc_flags_tables 1250 * @sgl: struct scatterlist memory for unidirectional mapping 1251 * @sgl_count: scatterlist count for unidirectional mapping 1252 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1253 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1254 * 1255 * Returns non zero to signal active I/O shutdown failure. All other 1256 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1257 * but still return zero here. 1258 * 1259 * This may only be called from process context, and also currently 1260 * assumes internal allocation of fabric payload buffer by target-core. 1261 */ 1262 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1263 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1264 u32 data_length, int task_attr, int data_dir, int flags, 1265 struct scatterlist *sgl, u32 sgl_count, 1266 struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1267 { 1268 struct se_portal_group *se_tpg; 1269 sense_reason_t rc; 1270 int ret; 1271 1272 se_tpg = se_sess->se_tpg; 1273 BUG_ON(!se_tpg); 1274 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1275 BUG_ON(in_interrupt()); 1276 /* 1277 * Initialize se_cmd for target operation. From this point 1278 * exceptions are handled by sending exception status via 1279 * target_core_fabric_ops->queue_status() callback 1280 */ 1281 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1282 data_length, data_dir, task_attr, sense); 1283 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1284 se_cmd->unknown_data_length = 1; 1285 /* 1286 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1287 * se_sess->sess_cmd_list. A second kref_get here is necessary 1288 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1289 * kref_put() to happen during fabric packet acknowledgement. 1290 */ 1291 ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1292 if (ret) 1293 return ret; 1294 /* 1295 * Signal bidirectional data payloads to target-core 1296 */ 1297 if (flags & TARGET_SCF_BIDI_OP) 1298 se_cmd->se_cmd_flags |= SCF_BIDI; 1299 /* 1300 * Locate se_lun pointer and attach it to struct se_cmd 1301 */ 1302 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1303 if (rc) { 1304 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1305 target_put_sess_cmd(se_sess, se_cmd); 1306 return 0; 1307 } 1308 1309 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1310 if (rc != 0) { 1311 transport_generic_request_failure(se_cmd, rc); 1312 return 0; 1313 } 1314 /* 1315 * When a non zero sgl_count has been passed perform SGL passthrough 1316 * mapping for pre-allocated fabric memory instead of having target 1317 * core perform an internal SGL allocation.. 1318 */ 1319 if (sgl_count != 0) { 1320 BUG_ON(!sgl); 1321 1322 /* 1323 * A work-around for tcm_loop as some userspace code via 1324 * scsi-generic do not memset their associated read buffers, 1325 * so go ahead and do that here for type non-data CDBs. Also 1326 * note that this is currently guaranteed to be a single SGL 1327 * for this case by target core in target_setup_cmd_from_cdb() 1328 * -> transport_generic_cmd_sequencer(). 1329 */ 1330 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1331 se_cmd->data_direction == DMA_FROM_DEVICE) { 1332 unsigned char *buf = NULL; 1333 1334 if (sgl) 1335 buf = kmap(sg_page(sgl)) + sgl->offset; 1336 1337 if (buf) { 1338 memset(buf, 0, sgl->length); 1339 kunmap(sg_page(sgl)); 1340 } 1341 } 1342 1343 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1344 sgl_bidi, sgl_bidi_count); 1345 if (rc != 0) { 1346 transport_generic_request_failure(se_cmd, rc); 1347 return 0; 1348 } 1349 } 1350 /* 1351 * Check if we need to delay processing because of ALUA 1352 * Active/NonOptimized primary access state.. 1353 */ 1354 core_alua_check_nonop_delay(se_cmd); 1355 1356 transport_handle_cdb_direct(se_cmd); 1357 return 0; 1358 } 1359 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1360 1361 /* 1362 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1363 * 1364 * @se_cmd: command descriptor to submit 1365 * @se_sess: associated se_sess for endpoint 1366 * @cdb: pointer to SCSI CDB 1367 * @sense: pointer to SCSI sense buffer 1368 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1369 * @data_length: fabric expected data transfer length 1370 * @task_addr: SAM task attribute 1371 * @data_dir: DMA data direction 1372 * @flags: flags for command submission from target_sc_flags_tables 1373 * 1374 * Returns non zero to signal active I/O shutdown failure. All other 1375 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1376 * but still return zero here. 1377 * 1378 * This may only be called from process context, and also currently 1379 * assumes internal allocation of fabric payload buffer by target-core. 1380 * 1381 * It also assumes interal target core SGL memory allocation. 1382 */ 1383 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1384 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1385 u32 data_length, int task_attr, int data_dir, int flags) 1386 { 1387 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1388 unpacked_lun, data_length, task_attr, data_dir, 1389 flags, NULL, 0, NULL, 0); 1390 } 1391 EXPORT_SYMBOL(target_submit_cmd); 1392 1393 static void target_complete_tmr_failure(struct work_struct *work) 1394 { 1395 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1396 1397 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1398 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1399 } 1400 1401 /** 1402 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1403 * for TMR CDBs 1404 * 1405 * @se_cmd: command descriptor to submit 1406 * @se_sess: associated se_sess for endpoint 1407 * @sense: pointer to SCSI sense buffer 1408 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1409 * @fabric_context: fabric context for TMR req 1410 * @tm_type: Type of TM request 1411 * @gfp: gfp type for caller 1412 * @tag: referenced task tag for TMR_ABORT_TASK 1413 * @flags: submit cmd flags 1414 * 1415 * Callable from all contexts. 1416 **/ 1417 1418 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1419 unsigned char *sense, u32 unpacked_lun, 1420 void *fabric_tmr_ptr, unsigned char tm_type, 1421 gfp_t gfp, unsigned int tag, int flags) 1422 { 1423 struct se_portal_group *se_tpg; 1424 int ret; 1425 1426 se_tpg = se_sess->se_tpg; 1427 BUG_ON(!se_tpg); 1428 1429 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1430 0, DMA_NONE, MSG_SIMPLE_TAG, sense); 1431 /* 1432 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1433 * allocation failure. 1434 */ 1435 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1436 if (ret < 0) 1437 return -ENOMEM; 1438 1439 if (tm_type == TMR_ABORT_TASK) 1440 se_cmd->se_tmr_req->ref_task_tag = tag; 1441 1442 /* See target_submit_cmd for commentary */ 1443 ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1444 if (ret) { 1445 core_tmr_release_req(se_cmd->se_tmr_req); 1446 return ret; 1447 } 1448 1449 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1450 if (ret) { 1451 /* 1452 * For callback during failure handling, push this work off 1453 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1454 */ 1455 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1456 schedule_work(&se_cmd->work); 1457 return 0; 1458 } 1459 transport_generic_handle_tmr(se_cmd); 1460 return 0; 1461 } 1462 EXPORT_SYMBOL(target_submit_tmr); 1463 1464 /* 1465 * If the cmd is active, request it to be stopped and sleep until it 1466 * has completed. 1467 */ 1468 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) 1469 { 1470 bool was_active = false; 1471 1472 if (cmd->transport_state & CMD_T_BUSY) { 1473 cmd->transport_state |= CMD_T_REQUEST_STOP; 1474 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1475 1476 pr_debug("cmd %p waiting to complete\n", cmd); 1477 wait_for_completion(&cmd->task_stop_comp); 1478 pr_debug("cmd %p stopped successfully\n", cmd); 1479 1480 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1481 cmd->transport_state &= ~CMD_T_REQUEST_STOP; 1482 cmd->transport_state &= ~CMD_T_BUSY; 1483 was_active = true; 1484 } 1485 1486 return was_active; 1487 } 1488 1489 /* 1490 * Handle SAM-esque emulation for generic transport request failures. 1491 */ 1492 void transport_generic_request_failure(struct se_cmd *cmd, 1493 sense_reason_t sense_reason) 1494 { 1495 int ret = 0; 1496 1497 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1498 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1499 cmd->t_task_cdb[0]); 1500 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1501 cmd->se_tfo->get_cmd_state(cmd), 1502 cmd->t_state, sense_reason); 1503 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1504 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1505 (cmd->transport_state & CMD_T_STOP) != 0, 1506 (cmd->transport_state & CMD_T_SENT) != 0); 1507 1508 /* 1509 * For SAM Task Attribute emulation for failed struct se_cmd 1510 */ 1511 transport_complete_task_attr(cmd); 1512 1513 switch (sense_reason) { 1514 case TCM_NON_EXISTENT_LUN: 1515 case TCM_UNSUPPORTED_SCSI_OPCODE: 1516 case TCM_INVALID_CDB_FIELD: 1517 case TCM_INVALID_PARAMETER_LIST: 1518 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1519 case TCM_UNKNOWN_MODE_PAGE: 1520 case TCM_WRITE_PROTECTED: 1521 case TCM_ADDRESS_OUT_OF_RANGE: 1522 case TCM_CHECK_CONDITION_ABORT_CMD: 1523 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1524 case TCM_CHECK_CONDITION_NOT_READY: 1525 break; 1526 case TCM_OUT_OF_RESOURCES: 1527 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1528 break; 1529 case TCM_RESERVATION_CONFLICT: 1530 /* 1531 * No SENSE Data payload for this case, set SCSI Status 1532 * and queue the response to $FABRIC_MOD. 1533 * 1534 * Uses linux/include/scsi/scsi.h SAM status codes defs 1535 */ 1536 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1537 /* 1538 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1539 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1540 * CONFLICT STATUS. 1541 * 1542 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1543 */ 1544 if (cmd->se_sess && 1545 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) 1546 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 1547 cmd->orig_fe_lun, 0x2C, 1548 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1549 1550 ret = cmd->se_tfo->queue_status(cmd); 1551 if (ret == -EAGAIN || ret == -ENOMEM) 1552 goto queue_full; 1553 goto check_stop; 1554 default: 1555 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1556 cmd->t_task_cdb[0], sense_reason); 1557 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1558 break; 1559 } 1560 1561 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1562 if (ret == -EAGAIN || ret == -ENOMEM) 1563 goto queue_full; 1564 1565 check_stop: 1566 transport_lun_remove_cmd(cmd); 1567 if (!transport_cmd_check_stop_to_fabric(cmd)) 1568 ; 1569 return; 1570 1571 queue_full: 1572 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1573 transport_handle_queue_full(cmd, cmd->se_dev); 1574 } 1575 EXPORT_SYMBOL(transport_generic_request_failure); 1576 1577 static void __target_execute_cmd(struct se_cmd *cmd) 1578 { 1579 sense_reason_t ret; 1580 1581 spin_lock_irq(&cmd->t_state_lock); 1582 cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT); 1583 spin_unlock_irq(&cmd->t_state_lock); 1584 1585 if (cmd->execute_cmd) { 1586 ret = cmd->execute_cmd(cmd); 1587 if (ret) { 1588 spin_lock_irq(&cmd->t_state_lock); 1589 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1590 spin_unlock_irq(&cmd->t_state_lock); 1591 1592 transport_generic_request_failure(cmd, ret); 1593 } 1594 } 1595 } 1596 1597 static bool target_handle_task_attr(struct se_cmd *cmd) 1598 { 1599 struct se_device *dev = cmd->se_dev; 1600 1601 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1602 return false; 1603 1604 /* 1605 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1606 * to allow the passed struct se_cmd list of tasks to the front of the list. 1607 */ 1608 switch (cmd->sam_task_attr) { 1609 case MSG_HEAD_TAG: 1610 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " 1611 "se_ordered_id: %u\n", 1612 cmd->t_task_cdb[0], cmd->se_ordered_id); 1613 return false; 1614 case MSG_ORDERED_TAG: 1615 atomic_inc(&dev->dev_ordered_sync); 1616 smp_mb__after_atomic_inc(); 1617 1618 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1619 " se_ordered_id: %u\n", 1620 cmd->t_task_cdb[0], cmd->se_ordered_id); 1621 1622 /* 1623 * Execute an ORDERED command if no other older commands 1624 * exist that need to be completed first. 1625 */ 1626 if (!atomic_read(&dev->simple_cmds)) 1627 return false; 1628 break; 1629 default: 1630 /* 1631 * For SIMPLE and UNTAGGED Task Attribute commands 1632 */ 1633 atomic_inc(&dev->simple_cmds); 1634 smp_mb__after_atomic_inc(); 1635 break; 1636 } 1637 1638 if (atomic_read(&dev->dev_ordered_sync) == 0) 1639 return false; 1640 1641 spin_lock(&dev->delayed_cmd_lock); 1642 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1643 spin_unlock(&dev->delayed_cmd_lock); 1644 1645 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" 1646 " delayed CMD list, se_ordered_id: %u\n", 1647 cmd->t_task_cdb[0], cmd->sam_task_attr, 1648 cmd->se_ordered_id); 1649 return true; 1650 } 1651 1652 void target_execute_cmd(struct se_cmd *cmd) 1653 { 1654 /* 1655 * If the received CDB has aleady been aborted stop processing it here. 1656 */ 1657 if (transport_check_aborted_status(cmd, 1)) { 1658 complete(&cmd->transport_lun_stop_comp); 1659 return; 1660 } 1661 1662 /* 1663 * Determine if IOCTL context caller in requesting the stopping of this 1664 * command for LUN shutdown purposes. 1665 */ 1666 spin_lock_irq(&cmd->t_state_lock); 1667 if (cmd->transport_state & CMD_T_LUN_STOP) { 1668 pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", 1669 __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); 1670 1671 cmd->transport_state &= ~CMD_T_ACTIVE; 1672 spin_unlock_irq(&cmd->t_state_lock); 1673 complete(&cmd->transport_lun_stop_comp); 1674 return; 1675 } 1676 /* 1677 * Determine if frontend context caller is requesting the stopping of 1678 * this command for frontend exceptions. 1679 */ 1680 if (cmd->transport_state & CMD_T_STOP) { 1681 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", 1682 __func__, __LINE__, 1683 cmd->se_tfo->get_task_tag(cmd)); 1684 1685 spin_unlock_irq(&cmd->t_state_lock); 1686 complete(&cmd->t_transport_stop_comp); 1687 return; 1688 } 1689 1690 cmd->t_state = TRANSPORT_PROCESSING; 1691 spin_unlock_irq(&cmd->t_state_lock); 1692 1693 if (!target_handle_task_attr(cmd)) 1694 __target_execute_cmd(cmd); 1695 } 1696 EXPORT_SYMBOL(target_execute_cmd); 1697 1698 /* 1699 * Process all commands up to the last received ORDERED task attribute which 1700 * requires another blocking boundary 1701 */ 1702 static void target_restart_delayed_cmds(struct se_device *dev) 1703 { 1704 for (;;) { 1705 struct se_cmd *cmd; 1706 1707 spin_lock(&dev->delayed_cmd_lock); 1708 if (list_empty(&dev->delayed_cmd_list)) { 1709 spin_unlock(&dev->delayed_cmd_lock); 1710 break; 1711 } 1712 1713 cmd = list_entry(dev->delayed_cmd_list.next, 1714 struct se_cmd, se_delayed_node); 1715 list_del(&cmd->se_delayed_node); 1716 spin_unlock(&dev->delayed_cmd_lock); 1717 1718 __target_execute_cmd(cmd); 1719 1720 if (cmd->sam_task_attr == MSG_ORDERED_TAG) 1721 break; 1722 } 1723 } 1724 1725 /* 1726 * Called from I/O completion to determine which dormant/delayed 1727 * and ordered cmds need to have their tasks added to the execution queue. 1728 */ 1729 static void transport_complete_task_attr(struct se_cmd *cmd) 1730 { 1731 struct se_device *dev = cmd->se_dev; 1732 1733 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1734 return; 1735 1736 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1737 atomic_dec(&dev->simple_cmds); 1738 smp_mb__after_atomic_dec(); 1739 dev->dev_cur_ordered_id++; 1740 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1741 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1742 cmd->se_ordered_id); 1743 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1744 dev->dev_cur_ordered_id++; 1745 pr_debug("Incremented dev_cur_ordered_id: %u for" 1746 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1747 cmd->se_ordered_id); 1748 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1749 atomic_dec(&dev->dev_ordered_sync); 1750 smp_mb__after_atomic_dec(); 1751 1752 dev->dev_cur_ordered_id++; 1753 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1754 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 1755 } 1756 1757 target_restart_delayed_cmds(dev); 1758 } 1759 1760 static void transport_complete_qf(struct se_cmd *cmd) 1761 { 1762 int ret = 0; 1763 1764 transport_complete_task_attr(cmd); 1765 1766 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1767 ret = cmd->se_tfo->queue_status(cmd); 1768 if (ret) 1769 goto out; 1770 } 1771 1772 switch (cmd->data_direction) { 1773 case DMA_FROM_DEVICE: 1774 ret = cmd->se_tfo->queue_data_in(cmd); 1775 break; 1776 case DMA_TO_DEVICE: 1777 if (cmd->t_bidi_data_sg) { 1778 ret = cmd->se_tfo->queue_data_in(cmd); 1779 if (ret < 0) 1780 break; 1781 } 1782 /* Fall through for DMA_TO_DEVICE */ 1783 case DMA_NONE: 1784 ret = cmd->se_tfo->queue_status(cmd); 1785 break; 1786 default: 1787 break; 1788 } 1789 1790 out: 1791 if (ret < 0) { 1792 transport_handle_queue_full(cmd, cmd->se_dev); 1793 return; 1794 } 1795 transport_lun_remove_cmd(cmd); 1796 transport_cmd_check_stop_to_fabric(cmd); 1797 } 1798 1799 static void transport_handle_queue_full( 1800 struct se_cmd *cmd, 1801 struct se_device *dev) 1802 { 1803 spin_lock_irq(&dev->qf_cmd_lock); 1804 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1805 atomic_inc(&dev->dev_qf_count); 1806 smp_mb__after_atomic_inc(); 1807 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1808 1809 schedule_work(&cmd->se_dev->qf_work_queue); 1810 } 1811 1812 static void target_complete_ok_work(struct work_struct *work) 1813 { 1814 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 1815 int ret; 1816 1817 /* 1818 * Check if we need to move delayed/dormant tasks from cmds on the 1819 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 1820 * Attribute. 1821 */ 1822 transport_complete_task_attr(cmd); 1823 1824 /* 1825 * Check to schedule QUEUE_FULL work, or execute an existing 1826 * cmd->transport_qf_callback() 1827 */ 1828 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 1829 schedule_work(&cmd->se_dev->qf_work_queue); 1830 1831 /* 1832 * Check if we need to send a sense buffer from 1833 * the struct se_cmd in question. 1834 */ 1835 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1836 WARN_ON(!cmd->scsi_status); 1837 ret = transport_send_check_condition_and_sense( 1838 cmd, 0, 1); 1839 if (ret == -EAGAIN || ret == -ENOMEM) 1840 goto queue_full; 1841 1842 transport_lun_remove_cmd(cmd); 1843 transport_cmd_check_stop_to_fabric(cmd); 1844 return; 1845 } 1846 /* 1847 * Check for a callback, used by amongst other things 1848 * XDWRITE_READ_10 emulation. 1849 */ 1850 if (cmd->transport_complete_callback) 1851 cmd->transport_complete_callback(cmd); 1852 1853 switch (cmd->data_direction) { 1854 case DMA_FROM_DEVICE: 1855 spin_lock(&cmd->se_lun->lun_sep_lock); 1856 if (cmd->se_lun->lun_sep) { 1857 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 1858 cmd->data_length; 1859 } 1860 spin_unlock(&cmd->se_lun->lun_sep_lock); 1861 1862 ret = cmd->se_tfo->queue_data_in(cmd); 1863 if (ret == -EAGAIN || ret == -ENOMEM) 1864 goto queue_full; 1865 break; 1866 case DMA_TO_DEVICE: 1867 spin_lock(&cmd->se_lun->lun_sep_lock); 1868 if (cmd->se_lun->lun_sep) { 1869 cmd->se_lun->lun_sep->sep_stats.rx_data_octets += 1870 cmd->data_length; 1871 } 1872 spin_unlock(&cmd->se_lun->lun_sep_lock); 1873 /* 1874 * Check if we need to send READ payload for BIDI-COMMAND 1875 */ 1876 if (cmd->t_bidi_data_sg) { 1877 spin_lock(&cmd->se_lun->lun_sep_lock); 1878 if (cmd->se_lun->lun_sep) { 1879 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 1880 cmd->data_length; 1881 } 1882 spin_unlock(&cmd->se_lun->lun_sep_lock); 1883 ret = cmd->se_tfo->queue_data_in(cmd); 1884 if (ret == -EAGAIN || ret == -ENOMEM) 1885 goto queue_full; 1886 break; 1887 } 1888 /* Fall through for DMA_TO_DEVICE */ 1889 case DMA_NONE: 1890 ret = cmd->se_tfo->queue_status(cmd); 1891 if (ret == -EAGAIN || ret == -ENOMEM) 1892 goto queue_full; 1893 break; 1894 default: 1895 break; 1896 } 1897 1898 transport_lun_remove_cmd(cmd); 1899 transport_cmd_check_stop_to_fabric(cmd); 1900 return; 1901 1902 queue_full: 1903 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 1904 " data_direction: %d\n", cmd, cmd->data_direction); 1905 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1906 transport_handle_queue_full(cmd, cmd->se_dev); 1907 } 1908 1909 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 1910 { 1911 struct scatterlist *sg; 1912 int count; 1913 1914 for_each_sg(sgl, sg, nents, count) 1915 __free_page(sg_page(sg)); 1916 1917 kfree(sgl); 1918 } 1919 1920 static inline void transport_free_pages(struct se_cmd *cmd) 1921 { 1922 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) 1923 return; 1924 1925 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 1926 cmd->t_data_sg = NULL; 1927 cmd->t_data_nents = 0; 1928 1929 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 1930 cmd->t_bidi_data_sg = NULL; 1931 cmd->t_bidi_data_nents = 0; 1932 } 1933 1934 /** 1935 * transport_release_cmd - free a command 1936 * @cmd: command to free 1937 * 1938 * This routine unconditionally frees a command, and reference counting 1939 * or list removal must be done in the caller. 1940 */ 1941 static void transport_release_cmd(struct se_cmd *cmd) 1942 { 1943 BUG_ON(!cmd->se_tfo); 1944 1945 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 1946 core_tmr_release_req(cmd->se_tmr_req); 1947 if (cmd->t_task_cdb != cmd->__t_task_cdb) 1948 kfree(cmd->t_task_cdb); 1949 /* 1950 * If this cmd has been setup with target_get_sess_cmd(), drop 1951 * the kref and call ->release_cmd() in kref callback. 1952 */ 1953 if (cmd->check_release != 0) { 1954 target_put_sess_cmd(cmd->se_sess, cmd); 1955 return; 1956 } 1957 cmd->se_tfo->release_cmd(cmd); 1958 } 1959 1960 /** 1961 * transport_put_cmd - release a reference to a command 1962 * @cmd: command to release 1963 * 1964 * This routine releases our reference to the command and frees it if possible. 1965 */ 1966 static void transport_put_cmd(struct se_cmd *cmd) 1967 { 1968 unsigned long flags; 1969 1970 spin_lock_irqsave(&cmd->t_state_lock, flags); 1971 if (atomic_read(&cmd->t_fe_count) && 1972 !atomic_dec_and_test(&cmd->t_fe_count)) { 1973 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 1974 return; 1975 } 1976 1977 if (cmd->transport_state & CMD_T_DEV_ACTIVE) { 1978 cmd->transport_state &= ~CMD_T_DEV_ACTIVE; 1979 target_remove_from_state_list(cmd); 1980 } 1981 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 1982 1983 transport_free_pages(cmd); 1984 transport_release_cmd(cmd); 1985 return; 1986 } 1987 1988 void *transport_kmap_data_sg(struct se_cmd *cmd) 1989 { 1990 struct scatterlist *sg = cmd->t_data_sg; 1991 struct page **pages; 1992 int i; 1993 1994 /* 1995 * We need to take into account a possible offset here for fabrics like 1996 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 1997 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 1998 */ 1999 if (!cmd->t_data_nents) 2000 return NULL; 2001 2002 BUG_ON(!sg); 2003 if (cmd->t_data_nents == 1) 2004 return kmap(sg_page(sg)) + sg->offset; 2005 2006 /* >1 page. use vmap */ 2007 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2008 if (!pages) 2009 return NULL; 2010 2011 /* convert sg[] to pages[] */ 2012 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2013 pages[i] = sg_page(sg); 2014 } 2015 2016 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2017 kfree(pages); 2018 if (!cmd->t_data_vmap) 2019 return NULL; 2020 2021 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2022 } 2023 EXPORT_SYMBOL(transport_kmap_data_sg); 2024 2025 void transport_kunmap_data_sg(struct se_cmd *cmd) 2026 { 2027 if (!cmd->t_data_nents) { 2028 return; 2029 } else if (cmd->t_data_nents == 1) { 2030 kunmap(sg_page(cmd->t_data_sg)); 2031 return; 2032 } 2033 2034 vunmap(cmd->t_data_vmap); 2035 cmd->t_data_vmap = NULL; 2036 } 2037 EXPORT_SYMBOL(transport_kunmap_data_sg); 2038 2039 static int 2040 transport_generic_get_mem(struct se_cmd *cmd) 2041 { 2042 u32 length = cmd->data_length; 2043 unsigned int nents; 2044 struct page *page; 2045 gfp_t zero_flag; 2046 int i = 0; 2047 2048 nents = DIV_ROUND_UP(length, PAGE_SIZE); 2049 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); 2050 if (!cmd->t_data_sg) 2051 return -ENOMEM; 2052 2053 cmd->t_data_nents = nents; 2054 sg_init_table(cmd->t_data_sg, nents); 2055 2056 zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO; 2057 2058 while (length) { 2059 u32 page_len = min_t(u32, length, PAGE_SIZE); 2060 page = alloc_page(GFP_KERNEL | zero_flag); 2061 if (!page) 2062 goto out; 2063 2064 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); 2065 length -= page_len; 2066 i++; 2067 } 2068 return 0; 2069 2070 out: 2071 while (i > 0) { 2072 i--; 2073 __free_page(sg_page(&cmd->t_data_sg[i])); 2074 } 2075 kfree(cmd->t_data_sg); 2076 cmd->t_data_sg = NULL; 2077 return -ENOMEM; 2078 } 2079 2080 /* 2081 * Allocate any required resources to execute the command. For writes we 2082 * might not have the payload yet, so notify the fabric via a call to 2083 * ->write_pending instead. Otherwise place it on the execution queue. 2084 */ 2085 sense_reason_t 2086 transport_generic_new_cmd(struct se_cmd *cmd) 2087 { 2088 int ret = 0; 2089 2090 /* 2091 * Determine is the TCM fabric module has already allocated physical 2092 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2093 * beforehand. 2094 */ 2095 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2096 cmd->data_length) { 2097 ret = transport_generic_get_mem(cmd); 2098 if (ret < 0) 2099 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2100 } 2101 2102 atomic_inc(&cmd->t_fe_count); 2103 2104 /* 2105 * If this command is not a write we can execute it right here, 2106 * for write buffers we need to notify the fabric driver first 2107 * and let it call back once the write buffers are ready. 2108 */ 2109 target_add_to_state_list(cmd); 2110 if (cmd->data_direction != DMA_TO_DEVICE) { 2111 target_execute_cmd(cmd); 2112 return 0; 2113 } 2114 2115 spin_lock_irq(&cmd->t_state_lock); 2116 cmd->t_state = TRANSPORT_WRITE_PENDING; 2117 spin_unlock_irq(&cmd->t_state_lock); 2118 2119 transport_cmd_check_stop(cmd, false); 2120 2121 ret = cmd->se_tfo->write_pending(cmd); 2122 if (ret == -EAGAIN || ret == -ENOMEM) 2123 goto queue_full; 2124 2125 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2126 WARN_ON(ret); 2127 2128 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2129 2130 queue_full: 2131 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2132 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2133 transport_handle_queue_full(cmd, cmd->se_dev); 2134 return 0; 2135 } 2136 EXPORT_SYMBOL(transport_generic_new_cmd); 2137 2138 static void transport_write_pending_qf(struct se_cmd *cmd) 2139 { 2140 int ret; 2141 2142 ret = cmd->se_tfo->write_pending(cmd); 2143 if (ret == -EAGAIN || ret == -ENOMEM) { 2144 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2145 cmd); 2146 transport_handle_queue_full(cmd, cmd->se_dev); 2147 } 2148 } 2149 2150 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2151 { 2152 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2153 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2154 transport_wait_for_tasks(cmd); 2155 2156 transport_release_cmd(cmd); 2157 } else { 2158 if (wait_for_tasks) 2159 transport_wait_for_tasks(cmd); 2160 2161 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); 2162 2163 if (cmd->se_lun) 2164 transport_lun_remove_cmd(cmd); 2165 2166 transport_put_cmd(cmd); 2167 } 2168 } 2169 EXPORT_SYMBOL(transport_generic_free_cmd); 2170 2171 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2172 * @se_sess: session to reference 2173 * @se_cmd: command descriptor to add 2174 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2175 */ 2176 static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, 2177 bool ack_kref) 2178 { 2179 unsigned long flags; 2180 int ret = 0; 2181 2182 kref_init(&se_cmd->cmd_kref); 2183 /* 2184 * Add a second kref if the fabric caller is expecting to handle 2185 * fabric acknowledgement that requires two target_put_sess_cmd() 2186 * invocations before se_cmd descriptor release. 2187 */ 2188 if (ack_kref == true) { 2189 kref_get(&se_cmd->cmd_kref); 2190 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2191 } 2192 2193 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2194 if (se_sess->sess_tearing_down) { 2195 ret = -ESHUTDOWN; 2196 goto out; 2197 } 2198 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2199 se_cmd->check_release = 1; 2200 2201 out: 2202 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2203 return ret; 2204 } 2205 2206 static void target_release_cmd_kref(struct kref *kref) 2207 { 2208 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2209 struct se_session *se_sess = se_cmd->se_sess; 2210 unsigned long flags; 2211 2212 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2213 if (list_empty(&se_cmd->se_cmd_list)) { 2214 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2215 se_cmd->se_tfo->release_cmd(se_cmd); 2216 return; 2217 } 2218 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2219 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2220 complete(&se_cmd->cmd_wait_comp); 2221 return; 2222 } 2223 list_del(&se_cmd->se_cmd_list); 2224 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2225 2226 se_cmd->se_tfo->release_cmd(se_cmd); 2227 } 2228 2229 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 2230 * @se_sess: session to reference 2231 * @se_cmd: command descriptor to drop 2232 */ 2233 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 2234 { 2235 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2236 } 2237 EXPORT_SYMBOL(target_put_sess_cmd); 2238 2239 /* target_sess_cmd_list_set_waiting - Flag all commands in 2240 * sess_cmd_list to complete cmd_wait_comp. Set 2241 * sess_tearing_down so no more commands are queued. 2242 * @se_sess: session to flag 2243 */ 2244 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2245 { 2246 struct se_cmd *se_cmd; 2247 unsigned long flags; 2248 2249 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2250 2251 WARN_ON(se_sess->sess_tearing_down); 2252 se_sess->sess_tearing_down = 1; 2253 2254 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) 2255 se_cmd->cmd_wait_set = 1; 2256 2257 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2258 } 2259 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2260 2261 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2262 * @se_sess: session to wait for active I/O 2263 * @wait_for_tasks: Make extra transport_wait_for_tasks call 2264 */ 2265 void target_wait_for_sess_cmds( 2266 struct se_session *se_sess, 2267 int wait_for_tasks) 2268 { 2269 struct se_cmd *se_cmd, *tmp_cmd; 2270 bool rc = false; 2271 2272 list_for_each_entry_safe(se_cmd, tmp_cmd, 2273 &se_sess->sess_cmd_list, se_cmd_list) { 2274 list_del(&se_cmd->se_cmd_list); 2275 2276 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2277 " %d\n", se_cmd, se_cmd->t_state, 2278 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2279 2280 if (wait_for_tasks) { 2281 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," 2282 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2283 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2284 2285 rc = transport_wait_for_tasks(se_cmd); 2286 2287 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d," 2288 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2289 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2290 } 2291 2292 if (!rc) { 2293 wait_for_completion(&se_cmd->cmd_wait_comp); 2294 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2295 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2296 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2297 } 2298 2299 se_cmd->se_tfo->release_cmd(se_cmd); 2300 } 2301 } 2302 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2303 2304 /* transport_lun_wait_for_tasks(): 2305 * 2306 * Called from ConfigFS context to stop the passed struct se_cmd to allow 2307 * an struct se_lun to be successfully shutdown. 2308 */ 2309 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) 2310 { 2311 unsigned long flags; 2312 int ret = 0; 2313 2314 /* 2315 * If the frontend has already requested this struct se_cmd to 2316 * be stopped, we can safely ignore this struct se_cmd. 2317 */ 2318 spin_lock_irqsave(&cmd->t_state_lock, flags); 2319 if (cmd->transport_state & CMD_T_STOP) { 2320 cmd->transport_state &= ~CMD_T_LUN_STOP; 2321 2322 pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", 2323 cmd->se_tfo->get_task_tag(cmd)); 2324 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2325 transport_cmd_check_stop(cmd, false); 2326 return -EPERM; 2327 } 2328 cmd->transport_state |= CMD_T_LUN_FE_STOP; 2329 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2330 2331 // XXX: audit task_flags checks. 2332 spin_lock_irqsave(&cmd->t_state_lock, flags); 2333 if ((cmd->transport_state & CMD_T_BUSY) && 2334 (cmd->transport_state & CMD_T_SENT)) { 2335 if (!target_stop_cmd(cmd, &flags)) 2336 ret++; 2337 } 2338 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2339 2340 pr_debug("ConfigFS: cmd: %p stop tasks ret:" 2341 " %d\n", cmd, ret); 2342 if (!ret) { 2343 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 2344 cmd->se_tfo->get_task_tag(cmd)); 2345 wait_for_completion(&cmd->transport_lun_stop_comp); 2346 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", 2347 cmd->se_tfo->get_task_tag(cmd)); 2348 } 2349 2350 return 0; 2351 } 2352 2353 static void __transport_clear_lun_from_sessions(struct se_lun *lun) 2354 { 2355 struct se_cmd *cmd = NULL; 2356 unsigned long lun_flags, cmd_flags; 2357 /* 2358 * Do exception processing and return CHECK_CONDITION status to the 2359 * Initiator Port. 2360 */ 2361 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 2362 while (!list_empty(&lun->lun_cmd_list)) { 2363 cmd = list_first_entry(&lun->lun_cmd_list, 2364 struct se_cmd, se_lun_node); 2365 list_del_init(&cmd->se_lun_node); 2366 2367 spin_lock(&cmd->t_state_lock); 2368 pr_debug("SE_LUN[%d] - Setting cmd->transport" 2369 "_lun_stop for ITT: 0x%08x\n", 2370 cmd->se_lun->unpacked_lun, 2371 cmd->se_tfo->get_task_tag(cmd)); 2372 cmd->transport_state |= CMD_T_LUN_STOP; 2373 spin_unlock(&cmd->t_state_lock); 2374 2375 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 2376 2377 if (!cmd->se_lun) { 2378 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", 2379 cmd->se_tfo->get_task_tag(cmd), 2380 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2381 BUG(); 2382 } 2383 /* 2384 * If the Storage engine still owns the iscsi_cmd_t, determine 2385 * and/or stop its context. 2386 */ 2387 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" 2388 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, 2389 cmd->se_tfo->get_task_tag(cmd)); 2390 2391 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { 2392 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 2393 continue; 2394 } 2395 2396 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" 2397 "_wait_for_tasks(): SUCCESS\n", 2398 cmd->se_lun->unpacked_lun, 2399 cmd->se_tfo->get_task_tag(cmd)); 2400 2401 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 2402 if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { 2403 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 2404 goto check_cond; 2405 } 2406 cmd->transport_state &= ~CMD_T_DEV_ACTIVE; 2407 target_remove_from_state_list(cmd); 2408 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 2409 2410 /* 2411 * The Storage engine stopped this struct se_cmd before it was 2412 * send to the fabric frontend for delivery back to the 2413 * Initiator Node. Return this SCSI CDB back with an 2414 * CHECK_CONDITION status. 2415 */ 2416 check_cond: 2417 transport_send_check_condition_and_sense(cmd, 2418 TCM_NON_EXISTENT_LUN, 0); 2419 /* 2420 * If the fabric frontend is waiting for this iscsi_cmd_t to 2421 * be released, notify the waiting thread now that LU has 2422 * finished accessing it. 2423 */ 2424 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 2425 if (cmd->transport_state & CMD_T_LUN_FE_STOP) { 2426 pr_debug("SE_LUN[%d] - Detected FE stop for" 2427 " struct se_cmd: %p ITT: 0x%08x\n", 2428 lun->unpacked_lun, 2429 cmd, cmd->se_tfo->get_task_tag(cmd)); 2430 2431 spin_unlock_irqrestore(&cmd->t_state_lock, 2432 cmd_flags); 2433 transport_cmd_check_stop(cmd, false); 2434 complete(&cmd->transport_lun_fe_stop_comp); 2435 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 2436 continue; 2437 } 2438 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", 2439 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); 2440 2441 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 2442 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 2443 } 2444 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 2445 } 2446 2447 static int transport_clear_lun_thread(void *p) 2448 { 2449 struct se_lun *lun = p; 2450 2451 __transport_clear_lun_from_sessions(lun); 2452 complete(&lun->lun_shutdown_comp); 2453 2454 return 0; 2455 } 2456 2457 int transport_clear_lun_from_sessions(struct se_lun *lun) 2458 { 2459 struct task_struct *kt; 2460 2461 kt = kthread_run(transport_clear_lun_thread, lun, 2462 "tcm_cl_%u", lun->unpacked_lun); 2463 if (IS_ERR(kt)) { 2464 pr_err("Unable to start clear_lun thread\n"); 2465 return PTR_ERR(kt); 2466 } 2467 wait_for_completion(&lun->lun_shutdown_comp); 2468 2469 return 0; 2470 } 2471 2472 /** 2473 * transport_wait_for_tasks - wait for completion to occur 2474 * @cmd: command to wait 2475 * 2476 * Called from frontend fabric context to wait for storage engine 2477 * to pause and/or release frontend generated struct se_cmd. 2478 */ 2479 bool transport_wait_for_tasks(struct se_cmd *cmd) 2480 { 2481 unsigned long flags; 2482 2483 spin_lock_irqsave(&cmd->t_state_lock, flags); 2484 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2485 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2486 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2487 return false; 2488 } 2489 2490 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2491 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2492 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2493 return false; 2494 } 2495 /* 2496 * If we are already stopped due to an external event (ie: LUN shutdown) 2497 * sleep until the connection can have the passed struct se_cmd back. 2498 * The cmd->transport_lun_stopped_sem will be upped by 2499 * transport_clear_lun_from_sessions() once the ConfigFS context caller 2500 * has completed its operation on the struct se_cmd. 2501 */ 2502 if (cmd->transport_state & CMD_T_LUN_STOP) { 2503 pr_debug("wait_for_tasks: Stopping" 2504 " wait_for_completion(&cmd->t_tasktransport_lun_fe" 2505 "_stop_comp); for ITT: 0x%08x\n", 2506 cmd->se_tfo->get_task_tag(cmd)); 2507 /* 2508 * There is a special case for WRITES where a FE exception + 2509 * LUN shutdown means ConfigFS context is still sleeping on 2510 * transport_lun_stop_comp in transport_lun_wait_for_tasks(). 2511 * We go ahead and up transport_lun_stop_comp just to be sure 2512 * here. 2513 */ 2514 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2515 complete(&cmd->transport_lun_stop_comp); 2516 wait_for_completion(&cmd->transport_lun_fe_stop_comp); 2517 spin_lock_irqsave(&cmd->t_state_lock, flags); 2518 2519 target_remove_from_state_list(cmd); 2520 /* 2521 * At this point, the frontend who was the originator of this 2522 * struct se_cmd, now owns the structure and can be released through 2523 * normal means below. 2524 */ 2525 pr_debug("wait_for_tasks: Stopped" 2526 " wait_for_completion(&cmd->t_tasktransport_lun_fe_" 2527 "stop_comp); for ITT: 0x%08x\n", 2528 cmd->se_tfo->get_task_tag(cmd)); 2529 2530 cmd->transport_state &= ~CMD_T_LUN_STOP; 2531 } 2532 2533 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2534 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2535 return false; 2536 } 2537 2538 cmd->transport_state |= CMD_T_STOP; 2539 2540 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" 2541 " i_state: %d, t_state: %d, CMD_T_STOP\n", 2542 cmd, cmd->se_tfo->get_task_tag(cmd), 2543 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2544 2545 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2546 2547 wait_for_completion(&cmd->t_transport_stop_comp); 2548 2549 spin_lock_irqsave(&cmd->t_state_lock, flags); 2550 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2551 2552 pr_debug("wait_for_tasks: Stopped wait_for_completion(" 2553 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", 2554 cmd->se_tfo->get_task_tag(cmd)); 2555 2556 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2557 2558 return true; 2559 } 2560 EXPORT_SYMBOL(transport_wait_for_tasks); 2561 2562 static int transport_get_sense_codes( 2563 struct se_cmd *cmd, 2564 u8 *asc, 2565 u8 *ascq) 2566 { 2567 *asc = cmd->scsi_asc; 2568 *ascq = cmd->scsi_ascq; 2569 2570 return 0; 2571 } 2572 2573 int 2574 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2575 sense_reason_t reason, int from_transport) 2576 { 2577 unsigned char *buffer = cmd->sense_buffer; 2578 unsigned long flags; 2579 u8 asc = 0, ascq = 0; 2580 2581 spin_lock_irqsave(&cmd->t_state_lock, flags); 2582 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2583 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2584 return 0; 2585 } 2586 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2587 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2588 2589 if (!reason && from_transport) 2590 goto after_reason; 2591 2592 if (!from_transport) 2593 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2594 2595 /* 2596 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses 2597 * SENSE KEY values from include/scsi/scsi.h 2598 */ 2599 switch (reason) { 2600 case TCM_NON_EXISTENT_LUN: 2601 /* CURRENT ERROR */ 2602 buffer[0] = 0x70; 2603 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2604 /* ILLEGAL REQUEST */ 2605 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2606 /* LOGICAL UNIT NOT SUPPORTED */ 2607 buffer[SPC_ASC_KEY_OFFSET] = 0x25; 2608 break; 2609 case TCM_UNSUPPORTED_SCSI_OPCODE: 2610 case TCM_SECTOR_COUNT_TOO_MANY: 2611 /* CURRENT ERROR */ 2612 buffer[0] = 0x70; 2613 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2614 /* ILLEGAL REQUEST */ 2615 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2616 /* INVALID COMMAND OPERATION CODE */ 2617 buffer[SPC_ASC_KEY_OFFSET] = 0x20; 2618 break; 2619 case TCM_UNKNOWN_MODE_PAGE: 2620 /* CURRENT ERROR */ 2621 buffer[0] = 0x70; 2622 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2623 /* ILLEGAL REQUEST */ 2624 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2625 /* INVALID FIELD IN CDB */ 2626 buffer[SPC_ASC_KEY_OFFSET] = 0x24; 2627 break; 2628 case TCM_CHECK_CONDITION_ABORT_CMD: 2629 /* CURRENT ERROR */ 2630 buffer[0] = 0x70; 2631 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2632 /* ABORTED COMMAND */ 2633 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2634 /* BUS DEVICE RESET FUNCTION OCCURRED */ 2635 buffer[SPC_ASC_KEY_OFFSET] = 0x29; 2636 buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; 2637 break; 2638 case TCM_INCORRECT_AMOUNT_OF_DATA: 2639 /* CURRENT ERROR */ 2640 buffer[0] = 0x70; 2641 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2642 /* ABORTED COMMAND */ 2643 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2644 /* WRITE ERROR */ 2645 buffer[SPC_ASC_KEY_OFFSET] = 0x0c; 2646 /* NOT ENOUGH UNSOLICITED DATA */ 2647 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d; 2648 break; 2649 case TCM_INVALID_CDB_FIELD: 2650 /* CURRENT ERROR */ 2651 buffer[0] = 0x70; 2652 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2653 /* ILLEGAL REQUEST */ 2654 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2655 /* INVALID FIELD IN CDB */ 2656 buffer[SPC_ASC_KEY_OFFSET] = 0x24; 2657 break; 2658 case TCM_INVALID_PARAMETER_LIST: 2659 /* CURRENT ERROR */ 2660 buffer[0] = 0x70; 2661 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2662 /* ILLEGAL REQUEST */ 2663 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2664 /* INVALID FIELD IN PARAMETER LIST */ 2665 buffer[SPC_ASC_KEY_OFFSET] = 0x26; 2666 break; 2667 case TCM_UNEXPECTED_UNSOLICITED_DATA: 2668 /* CURRENT ERROR */ 2669 buffer[0] = 0x70; 2670 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2671 /* ABORTED COMMAND */ 2672 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2673 /* WRITE ERROR */ 2674 buffer[SPC_ASC_KEY_OFFSET] = 0x0c; 2675 /* UNEXPECTED_UNSOLICITED_DATA */ 2676 buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c; 2677 break; 2678 case TCM_SERVICE_CRC_ERROR: 2679 /* CURRENT ERROR */ 2680 buffer[0] = 0x70; 2681 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2682 /* ABORTED COMMAND */ 2683 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2684 /* PROTOCOL SERVICE CRC ERROR */ 2685 buffer[SPC_ASC_KEY_OFFSET] = 0x47; 2686 /* N/A */ 2687 buffer[SPC_ASCQ_KEY_OFFSET] = 0x05; 2688 break; 2689 case TCM_SNACK_REJECTED: 2690 /* CURRENT ERROR */ 2691 buffer[0] = 0x70; 2692 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2693 /* ABORTED COMMAND */ 2694 buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 2695 /* READ ERROR */ 2696 buffer[SPC_ASC_KEY_OFFSET] = 0x11; 2697 /* FAILED RETRANSMISSION REQUEST */ 2698 buffer[SPC_ASCQ_KEY_OFFSET] = 0x13; 2699 break; 2700 case TCM_WRITE_PROTECTED: 2701 /* CURRENT ERROR */ 2702 buffer[0] = 0x70; 2703 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2704 /* DATA PROTECT */ 2705 buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; 2706 /* WRITE PROTECTED */ 2707 buffer[SPC_ASC_KEY_OFFSET] = 0x27; 2708 break; 2709 case TCM_ADDRESS_OUT_OF_RANGE: 2710 /* CURRENT ERROR */ 2711 buffer[0] = 0x70; 2712 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2713 /* ILLEGAL REQUEST */ 2714 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2715 /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2716 buffer[SPC_ASC_KEY_OFFSET] = 0x21; 2717 break; 2718 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 2719 /* CURRENT ERROR */ 2720 buffer[0] = 0x70; 2721 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2722 /* UNIT ATTENTION */ 2723 buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 2724 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2725 buffer[SPC_ASC_KEY_OFFSET] = asc; 2726 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2727 break; 2728 case TCM_CHECK_CONDITION_NOT_READY: 2729 /* CURRENT ERROR */ 2730 buffer[0] = 0x70; 2731 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2732 /* Not Ready */ 2733 buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; 2734 transport_get_sense_codes(cmd, &asc, &ascq); 2735 buffer[SPC_ASC_KEY_OFFSET] = asc; 2736 buffer[SPC_ASCQ_KEY_OFFSET] = ascq; 2737 break; 2738 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 2739 default: 2740 /* CURRENT ERROR */ 2741 buffer[0] = 0x70; 2742 buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; 2743 /* ILLEGAL REQUEST */ 2744 buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 2745 /* LOGICAL UNIT COMMUNICATION FAILURE */ 2746 buffer[SPC_ASC_KEY_OFFSET] = 0x80; 2747 break; 2748 } 2749 /* 2750 * This code uses linux/include/scsi/scsi.h SAM status codes! 2751 */ 2752 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 2753 /* 2754 * Automatically padded, this value is encoded in the fabric's 2755 * data_length response PDU containing the SCSI defined sense data. 2756 */ 2757 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 2758 2759 after_reason: 2760 return cmd->se_tfo->queue_status(cmd); 2761 } 2762 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2763 2764 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2765 { 2766 if (!(cmd->transport_state & CMD_T_ABORTED)) 2767 return 0; 2768 2769 if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 2770 return 1; 2771 2772 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", 2773 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); 2774 2775 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 2776 cmd->se_tfo->queue_status(cmd); 2777 2778 return 1; 2779 } 2780 EXPORT_SYMBOL(transport_check_aborted_status); 2781 2782 void transport_send_task_abort(struct se_cmd *cmd) 2783 { 2784 unsigned long flags; 2785 2786 spin_lock_irqsave(&cmd->t_state_lock, flags); 2787 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { 2788 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2789 return; 2790 } 2791 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2792 2793 /* 2794 * If there are still expected incoming fabric WRITEs, we wait 2795 * until until they have completed before sending a TASK_ABORTED 2796 * response. This response with TASK_ABORTED status will be 2797 * queued back to fabric module by transport_check_aborted_status(). 2798 */ 2799 if (cmd->data_direction == DMA_TO_DEVICE) { 2800 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2801 cmd->transport_state |= CMD_T_ABORTED; 2802 smp_mb__after_atomic_inc(); 2803 } 2804 } 2805 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2806 2807 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 2808 " ITT: 0x%08x\n", cmd->t_task_cdb[0], 2809 cmd->se_tfo->get_task_tag(cmd)); 2810 2811 cmd->se_tfo->queue_status(cmd); 2812 } 2813 2814 static void target_tmr_work(struct work_struct *work) 2815 { 2816 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2817 struct se_device *dev = cmd->se_dev; 2818 struct se_tmr_req *tmr = cmd->se_tmr_req; 2819 int ret; 2820 2821 switch (tmr->function) { 2822 case TMR_ABORT_TASK: 2823 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2824 break; 2825 case TMR_ABORT_TASK_SET: 2826 case TMR_CLEAR_ACA: 2827 case TMR_CLEAR_TASK_SET: 2828 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2829 break; 2830 case TMR_LUN_RESET: 2831 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 2832 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 2833 TMR_FUNCTION_REJECTED; 2834 break; 2835 case TMR_TARGET_WARM_RESET: 2836 tmr->response = TMR_FUNCTION_REJECTED; 2837 break; 2838 case TMR_TARGET_COLD_RESET: 2839 tmr->response = TMR_FUNCTION_REJECTED; 2840 break; 2841 default: 2842 pr_err("Uknown TMR function: 0x%02x.\n", 2843 tmr->function); 2844 tmr->response = TMR_FUNCTION_REJECTED; 2845 break; 2846 } 2847 2848 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 2849 cmd->se_tfo->queue_tm_rsp(cmd); 2850 2851 transport_cmd_check_stop_to_fabric(cmd); 2852 } 2853 2854 int transport_generic_handle_tmr( 2855 struct se_cmd *cmd) 2856 { 2857 INIT_WORK(&cmd->work, target_tmr_work); 2858 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 2859 return 0; 2860 } 2861 EXPORT_SYMBOL(transport_generic_handle_tmr); 2862