1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <linux/vmalloc.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi_proto.h> 42 #include <scsi/scsi_common.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 48 #include "target_core_internal.h" 49 #include "target_core_alua.h" 50 #include "target_core_pr.h" 51 #include "target_core_ua.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/target.h> 55 56 static struct workqueue_struct *target_completion_wq; 57 static struct kmem_cache *se_sess_cache; 58 struct kmem_cache *se_ua_cache; 59 struct kmem_cache *t10_pr_reg_cache; 60 struct kmem_cache *t10_alua_lu_gp_cache; 61 struct kmem_cache *t10_alua_lu_gp_mem_cache; 62 struct kmem_cache *t10_alua_tg_pt_gp_cache; 63 struct kmem_cache *t10_alua_lba_map_cache; 64 struct kmem_cache *t10_alua_lba_map_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void transport_handle_queue_full(struct se_cmd *cmd, 68 struct se_device *dev); 69 static int transport_put_cmd(struct se_cmd *cmd); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_lba_map_cache = kmem_cache_create( 122 "t10_alua_lba_map_cache", 123 sizeof(struct t10_alua_lba_map), 124 __alignof__(struct t10_alua_lba_map), 0, NULL); 125 if (!t10_alua_lba_map_cache) { 126 pr_err("kmem_cache_create() for t10_alua_lba_map_" 127 "cache failed\n"); 128 goto out_free_tg_pt_gp_cache; 129 } 130 t10_alua_lba_map_mem_cache = kmem_cache_create( 131 "t10_alua_lba_map_mem_cache", 132 sizeof(struct t10_alua_lba_map_member), 133 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 134 if (!t10_alua_lba_map_mem_cache) { 135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 136 "cache failed\n"); 137 goto out_free_lba_map_cache; 138 } 139 140 target_completion_wq = alloc_workqueue("target_completion", 141 WQ_MEM_RECLAIM, 0); 142 if (!target_completion_wq) 143 goto out_free_lba_map_mem_cache; 144 145 return 0; 146 147 out_free_lba_map_mem_cache: 148 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 149 out_free_lba_map_cache: 150 kmem_cache_destroy(t10_alua_lba_map_cache); 151 out_free_tg_pt_gp_cache: 152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 153 out_free_lu_gp_mem_cache: 154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 155 out_free_lu_gp_cache: 156 kmem_cache_destroy(t10_alua_lu_gp_cache); 157 out_free_pr_reg_cache: 158 kmem_cache_destroy(t10_pr_reg_cache); 159 out_free_ua_cache: 160 kmem_cache_destroy(se_ua_cache); 161 out_free_sess_cache: 162 kmem_cache_destroy(se_sess_cache); 163 out: 164 return -ENOMEM; 165 } 166 167 void release_se_kmem_caches(void) 168 { 169 destroy_workqueue(target_completion_wq); 170 kmem_cache_destroy(se_sess_cache); 171 kmem_cache_destroy(se_ua_cache); 172 kmem_cache_destroy(t10_pr_reg_cache); 173 kmem_cache_destroy(t10_alua_lu_gp_cache); 174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 176 kmem_cache_destroy(t10_alua_lba_map_cache); 177 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 178 } 179 180 /* This code ensures unique mib indexes are handed out. */ 181 static DEFINE_SPINLOCK(scsi_mib_index_lock); 182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 183 184 /* 185 * Allocate a new row index for the entry type specified 186 */ 187 u32 scsi_get_new_index(scsi_index_t type) 188 { 189 u32 new_index; 190 191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 192 193 spin_lock(&scsi_mib_index_lock); 194 new_index = ++scsi_mib_index[type]; 195 spin_unlock(&scsi_mib_index_lock); 196 197 return new_index; 198 } 199 200 void transport_subsystem_check_init(void) 201 { 202 int ret; 203 static int sub_api_initialized; 204 205 if (sub_api_initialized) 206 return; 207 208 ret = request_module("target_core_iblock"); 209 if (ret != 0) 210 pr_err("Unable to load target_core_iblock\n"); 211 212 ret = request_module("target_core_file"); 213 if (ret != 0) 214 pr_err("Unable to load target_core_file\n"); 215 216 ret = request_module("target_core_pscsi"); 217 if (ret != 0) 218 pr_err("Unable to load target_core_pscsi\n"); 219 220 ret = request_module("target_core_user"); 221 if (ret != 0) 222 pr_err("Unable to load target_core_user\n"); 223 224 sub_api_initialized = 1; 225 } 226 227 struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) 228 { 229 struct se_session *se_sess; 230 231 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 232 if (!se_sess) { 233 pr_err("Unable to allocate struct se_session from" 234 " se_sess_cache\n"); 235 return ERR_PTR(-ENOMEM); 236 } 237 INIT_LIST_HEAD(&se_sess->sess_list); 238 INIT_LIST_HEAD(&se_sess->sess_acl_list); 239 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 240 INIT_LIST_HEAD(&se_sess->sess_wait_list); 241 spin_lock_init(&se_sess->sess_cmd_lock); 242 kref_init(&se_sess->sess_kref); 243 se_sess->sup_prot_ops = sup_prot_ops; 244 245 return se_sess; 246 } 247 EXPORT_SYMBOL(transport_init_session); 248 249 int transport_alloc_session_tags(struct se_session *se_sess, 250 unsigned int tag_num, unsigned int tag_size) 251 { 252 int rc; 253 254 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, 255 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 256 if (!se_sess->sess_cmd_map) { 257 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); 258 if (!se_sess->sess_cmd_map) { 259 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 260 return -ENOMEM; 261 } 262 } 263 264 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 265 if (rc < 0) { 266 pr_err("Unable to init se_sess->sess_tag_pool," 267 " tag_num: %u\n", tag_num); 268 kvfree(se_sess->sess_cmd_map); 269 se_sess->sess_cmd_map = NULL; 270 return -ENOMEM; 271 } 272 273 return 0; 274 } 275 EXPORT_SYMBOL(transport_alloc_session_tags); 276 277 struct se_session *transport_init_session_tags(unsigned int tag_num, 278 unsigned int tag_size, 279 enum target_prot_op sup_prot_ops) 280 { 281 struct se_session *se_sess; 282 int rc; 283 284 se_sess = transport_init_session(sup_prot_ops); 285 if (IS_ERR(se_sess)) 286 return se_sess; 287 288 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 289 if (rc < 0) { 290 transport_free_session(se_sess); 291 return ERR_PTR(-ENOMEM); 292 } 293 294 return se_sess; 295 } 296 EXPORT_SYMBOL(transport_init_session_tags); 297 298 /* 299 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 300 */ 301 void __transport_register_session( 302 struct se_portal_group *se_tpg, 303 struct se_node_acl *se_nacl, 304 struct se_session *se_sess, 305 void *fabric_sess_ptr) 306 { 307 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 308 unsigned char buf[PR_REG_ISID_LEN]; 309 310 se_sess->se_tpg = se_tpg; 311 se_sess->fabric_sess_ptr = fabric_sess_ptr; 312 /* 313 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 314 * 315 * Only set for struct se_session's that will actually be moving I/O. 316 * eg: *NOT* discovery sessions. 317 */ 318 if (se_nacl) { 319 /* 320 * 321 * Determine if fabric allows for T10-PI feature bits exposed to 322 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 323 * 324 * If so, then always save prot_type on a per se_node_acl node 325 * basis and re-instate the previous sess_prot_type to avoid 326 * disabling PI from below any previously initiator side 327 * registered LUNs. 328 */ 329 if (se_nacl->saved_prot_type) 330 se_sess->sess_prot_type = se_nacl->saved_prot_type; 331 else if (tfo->tpg_check_prot_fabric_only) 332 se_sess->sess_prot_type = se_nacl->saved_prot_type = 333 tfo->tpg_check_prot_fabric_only(se_tpg); 334 /* 335 * If the fabric module supports an ISID based TransportID, 336 * save this value in binary from the fabric I_T Nexus now. 337 */ 338 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 339 memset(&buf[0], 0, PR_REG_ISID_LEN); 340 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 341 &buf[0], PR_REG_ISID_LEN); 342 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 343 } 344 kref_get(&se_nacl->acl_kref); 345 346 spin_lock_irq(&se_nacl->nacl_sess_lock); 347 /* 348 * The se_nacl->nacl_sess pointer will be set to the 349 * last active I_T Nexus for each struct se_node_acl. 350 */ 351 se_nacl->nacl_sess = se_sess; 352 353 list_add_tail(&se_sess->sess_acl_list, 354 &se_nacl->acl_sess_list); 355 spin_unlock_irq(&se_nacl->nacl_sess_lock); 356 } 357 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 358 359 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 360 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 361 } 362 EXPORT_SYMBOL(__transport_register_session); 363 364 void transport_register_session( 365 struct se_portal_group *se_tpg, 366 struct se_node_acl *se_nacl, 367 struct se_session *se_sess, 368 void *fabric_sess_ptr) 369 { 370 unsigned long flags; 371 372 spin_lock_irqsave(&se_tpg->session_lock, flags); 373 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 374 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 375 } 376 EXPORT_SYMBOL(transport_register_session); 377 378 static void target_release_session(struct kref *kref) 379 { 380 struct se_session *se_sess = container_of(kref, 381 struct se_session, sess_kref); 382 struct se_portal_group *se_tpg = se_sess->se_tpg; 383 384 se_tpg->se_tpg_tfo->close_session(se_sess); 385 } 386 387 void target_get_session(struct se_session *se_sess) 388 { 389 kref_get(&se_sess->sess_kref); 390 } 391 EXPORT_SYMBOL(target_get_session); 392 393 void target_put_session(struct se_session *se_sess) 394 { 395 kref_put(&se_sess->sess_kref, target_release_session); 396 } 397 EXPORT_SYMBOL(target_put_session); 398 399 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 400 { 401 struct se_session *se_sess; 402 ssize_t len = 0; 403 404 spin_lock_bh(&se_tpg->session_lock); 405 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 406 if (!se_sess->se_node_acl) 407 continue; 408 if (!se_sess->se_node_acl->dynamic_node_acl) 409 continue; 410 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 411 break; 412 413 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 414 se_sess->se_node_acl->initiatorname); 415 len += 1; /* Include NULL terminator */ 416 } 417 spin_unlock_bh(&se_tpg->session_lock); 418 419 return len; 420 } 421 EXPORT_SYMBOL(target_show_dynamic_sessions); 422 423 static void target_complete_nacl(struct kref *kref) 424 { 425 struct se_node_acl *nacl = container_of(kref, 426 struct se_node_acl, acl_kref); 427 428 complete(&nacl->acl_free_comp); 429 } 430 431 void target_put_nacl(struct se_node_acl *nacl) 432 { 433 kref_put(&nacl->acl_kref, target_complete_nacl); 434 } 435 436 void transport_deregister_session_configfs(struct se_session *se_sess) 437 { 438 struct se_node_acl *se_nacl; 439 unsigned long flags; 440 /* 441 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 442 */ 443 se_nacl = se_sess->se_node_acl; 444 if (se_nacl) { 445 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 446 if (se_nacl->acl_stop == 0) 447 list_del(&se_sess->sess_acl_list); 448 /* 449 * If the session list is empty, then clear the pointer. 450 * Otherwise, set the struct se_session pointer from the tail 451 * element of the per struct se_node_acl active session list. 452 */ 453 if (list_empty(&se_nacl->acl_sess_list)) 454 se_nacl->nacl_sess = NULL; 455 else { 456 se_nacl->nacl_sess = container_of( 457 se_nacl->acl_sess_list.prev, 458 struct se_session, sess_acl_list); 459 } 460 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 461 } 462 } 463 EXPORT_SYMBOL(transport_deregister_session_configfs); 464 465 void transport_free_session(struct se_session *se_sess) 466 { 467 if (se_sess->sess_cmd_map) { 468 percpu_ida_destroy(&se_sess->sess_tag_pool); 469 kvfree(se_sess->sess_cmd_map); 470 } 471 kmem_cache_free(se_sess_cache, se_sess); 472 } 473 EXPORT_SYMBOL(transport_free_session); 474 475 void transport_deregister_session(struct se_session *se_sess) 476 { 477 struct se_portal_group *se_tpg = se_sess->se_tpg; 478 const struct target_core_fabric_ops *se_tfo; 479 struct se_node_acl *se_nacl; 480 unsigned long flags; 481 bool comp_nacl = true, drop_nacl = false; 482 483 if (!se_tpg) { 484 transport_free_session(se_sess); 485 return; 486 } 487 se_tfo = se_tpg->se_tpg_tfo; 488 489 spin_lock_irqsave(&se_tpg->session_lock, flags); 490 list_del(&se_sess->sess_list); 491 se_sess->se_tpg = NULL; 492 se_sess->fabric_sess_ptr = NULL; 493 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 494 495 /* 496 * Determine if we need to do extra work for this initiator node's 497 * struct se_node_acl if it had been previously dynamically generated. 498 */ 499 se_nacl = se_sess->se_node_acl; 500 501 mutex_lock(&se_tpg->acl_node_mutex); 502 if (se_nacl && se_nacl->dynamic_node_acl) { 503 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 504 list_del(&se_nacl->acl_list); 505 se_tpg->num_node_acls--; 506 drop_nacl = true; 507 } 508 } 509 mutex_unlock(&se_tpg->acl_node_mutex); 510 511 if (drop_nacl) { 512 core_tpg_wait_for_nacl_pr_ref(se_nacl); 513 core_free_device_list_for_node(se_nacl, se_tpg); 514 kfree(se_nacl); 515 comp_nacl = false; 516 } 517 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 518 se_tpg->se_tpg_tfo->get_fabric_name()); 519 /* 520 * If last kref is dropping now for an explicit NodeACL, awake sleeping 521 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 522 * removal context. 523 */ 524 if (se_nacl && comp_nacl) 525 target_put_nacl(se_nacl); 526 527 transport_free_session(se_sess); 528 } 529 EXPORT_SYMBOL(transport_deregister_session); 530 531 /* 532 * Called with cmd->t_state_lock held. 533 */ 534 static void target_remove_from_state_list(struct se_cmd *cmd) 535 { 536 struct se_device *dev = cmd->se_dev; 537 unsigned long flags; 538 539 if (!dev) 540 return; 541 542 if (cmd->transport_state & CMD_T_BUSY) 543 return; 544 545 spin_lock_irqsave(&dev->execute_task_lock, flags); 546 if (cmd->state_active) { 547 list_del(&cmd->state_list); 548 cmd->state_active = false; 549 } 550 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 551 } 552 553 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, 554 bool write_pending) 555 { 556 unsigned long flags; 557 558 spin_lock_irqsave(&cmd->t_state_lock, flags); 559 if (write_pending) 560 cmd->t_state = TRANSPORT_WRITE_PENDING; 561 562 if (remove_from_lists) { 563 target_remove_from_state_list(cmd); 564 565 /* 566 * Clear struct se_cmd->se_lun before the handoff to FE. 567 */ 568 cmd->se_lun = NULL; 569 } 570 571 /* 572 * Determine if frontend context caller is requesting the stopping of 573 * this command for frontend exceptions. 574 */ 575 if (cmd->transport_state & CMD_T_STOP) { 576 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 577 __func__, __LINE__, cmd->tag); 578 579 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 580 581 complete_all(&cmd->t_transport_stop_comp); 582 return 1; 583 } 584 585 cmd->transport_state &= ~CMD_T_ACTIVE; 586 if (remove_from_lists) { 587 /* 588 * Some fabric modules like tcm_loop can release 589 * their internally allocated I/O reference now and 590 * struct se_cmd now. 591 * 592 * Fabric modules are expected to return '1' here if the 593 * se_cmd being passed is released at this point, 594 * or zero if not being released. 595 */ 596 if (cmd->se_tfo->check_stop_free != NULL) { 597 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 598 return cmd->se_tfo->check_stop_free(cmd); 599 } 600 } 601 602 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 603 return 0; 604 } 605 606 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 607 { 608 return transport_cmd_check_stop(cmd, true, false); 609 } 610 611 static void transport_lun_remove_cmd(struct se_cmd *cmd) 612 { 613 struct se_lun *lun = cmd->se_lun; 614 615 if (!lun) 616 return; 617 618 if (cmpxchg(&cmd->lun_ref_active, true, false)) 619 percpu_ref_put(&lun->lun_ref); 620 } 621 622 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 623 { 624 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 625 transport_lun_remove_cmd(cmd); 626 /* 627 * Allow the fabric driver to unmap any resources before 628 * releasing the descriptor via TFO->release_cmd() 629 */ 630 if (remove) 631 cmd->se_tfo->aborted_task(cmd); 632 633 if (transport_cmd_check_stop_to_fabric(cmd)) 634 return; 635 if (remove) 636 transport_put_cmd(cmd); 637 } 638 639 static void target_complete_failure_work(struct work_struct *work) 640 { 641 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 642 643 transport_generic_request_failure(cmd, 644 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 645 } 646 647 /* 648 * Used when asking transport to copy Sense Data from the underlying 649 * Linux/SCSI struct scsi_cmnd 650 */ 651 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 652 { 653 struct se_device *dev = cmd->se_dev; 654 655 WARN_ON(!cmd->se_lun); 656 657 if (!dev) 658 return NULL; 659 660 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 661 return NULL; 662 663 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 664 665 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 666 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 667 return cmd->sense_buffer; 668 } 669 670 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 671 { 672 struct se_device *dev = cmd->se_dev; 673 int success = scsi_status == GOOD; 674 unsigned long flags; 675 676 cmd->scsi_status = scsi_status; 677 678 679 spin_lock_irqsave(&cmd->t_state_lock, flags); 680 cmd->transport_state &= ~CMD_T_BUSY; 681 682 if (dev && dev->transport->transport_complete) { 683 dev->transport->transport_complete(cmd, 684 cmd->t_data_sg, 685 transport_get_sense_buffer(cmd)); 686 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 687 success = 1; 688 } 689 690 /* 691 * See if we are waiting to complete for an exception condition. 692 */ 693 if (cmd->transport_state & CMD_T_REQUEST_STOP) { 694 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 695 complete(&cmd->task_stop_comp); 696 return; 697 } 698 699 /* 700 * Check for case where an explicit ABORT_TASK has been received 701 * and transport_wait_for_tasks() will be waiting for completion.. 702 */ 703 if (cmd->transport_state & CMD_T_ABORTED && 704 cmd->transport_state & CMD_T_STOP) { 705 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 706 complete_all(&cmd->t_transport_stop_comp); 707 return; 708 } else if (!success) { 709 INIT_WORK(&cmd->work, target_complete_failure_work); 710 } else { 711 INIT_WORK(&cmd->work, target_complete_ok_work); 712 } 713 714 cmd->t_state = TRANSPORT_COMPLETE; 715 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 716 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 717 718 queue_work(target_completion_wq, &cmd->work); 719 } 720 EXPORT_SYMBOL(target_complete_cmd); 721 722 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 723 { 724 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { 725 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 726 cmd->residual_count += cmd->data_length - length; 727 } else { 728 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 729 cmd->residual_count = cmd->data_length - length; 730 } 731 732 cmd->data_length = length; 733 } 734 735 target_complete_cmd(cmd, scsi_status); 736 } 737 EXPORT_SYMBOL(target_complete_cmd_with_length); 738 739 static void target_add_to_state_list(struct se_cmd *cmd) 740 { 741 struct se_device *dev = cmd->se_dev; 742 unsigned long flags; 743 744 spin_lock_irqsave(&dev->execute_task_lock, flags); 745 if (!cmd->state_active) { 746 list_add_tail(&cmd->state_list, &dev->state_list); 747 cmd->state_active = true; 748 } 749 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 750 } 751 752 /* 753 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 754 */ 755 static void transport_write_pending_qf(struct se_cmd *cmd); 756 static void transport_complete_qf(struct se_cmd *cmd); 757 758 void target_qf_do_work(struct work_struct *work) 759 { 760 struct se_device *dev = container_of(work, struct se_device, 761 qf_work_queue); 762 LIST_HEAD(qf_cmd_list); 763 struct se_cmd *cmd, *cmd_tmp; 764 765 spin_lock_irq(&dev->qf_cmd_lock); 766 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 767 spin_unlock_irq(&dev->qf_cmd_lock); 768 769 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 770 list_del(&cmd->se_qf_node); 771 atomic_dec_mb(&dev->dev_qf_count); 772 773 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 774 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 775 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 776 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 777 : "UNKNOWN"); 778 779 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 780 transport_write_pending_qf(cmd); 781 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 782 transport_complete_qf(cmd); 783 } 784 } 785 786 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 787 { 788 switch (cmd->data_direction) { 789 case DMA_NONE: 790 return "NONE"; 791 case DMA_FROM_DEVICE: 792 return "READ"; 793 case DMA_TO_DEVICE: 794 return "WRITE"; 795 case DMA_BIDIRECTIONAL: 796 return "BIDI"; 797 default: 798 break; 799 } 800 801 return "UNKNOWN"; 802 } 803 804 void transport_dump_dev_state( 805 struct se_device *dev, 806 char *b, 807 int *bl) 808 { 809 *bl += sprintf(b + *bl, "Status: "); 810 if (dev->export_count) 811 *bl += sprintf(b + *bl, "ACTIVATED"); 812 else 813 *bl += sprintf(b + *bl, "DEACTIVATED"); 814 815 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 816 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 817 dev->dev_attrib.block_size, 818 dev->dev_attrib.hw_max_sectors); 819 *bl += sprintf(b + *bl, " "); 820 } 821 822 void transport_dump_vpd_proto_id( 823 struct t10_vpd *vpd, 824 unsigned char *p_buf, 825 int p_buf_len) 826 { 827 unsigned char buf[VPD_TMP_BUF_SIZE]; 828 int len; 829 830 memset(buf, 0, VPD_TMP_BUF_SIZE); 831 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 832 833 switch (vpd->protocol_identifier) { 834 case 0x00: 835 sprintf(buf+len, "Fibre Channel\n"); 836 break; 837 case 0x10: 838 sprintf(buf+len, "Parallel SCSI\n"); 839 break; 840 case 0x20: 841 sprintf(buf+len, "SSA\n"); 842 break; 843 case 0x30: 844 sprintf(buf+len, "IEEE 1394\n"); 845 break; 846 case 0x40: 847 sprintf(buf+len, "SCSI Remote Direct Memory Access" 848 " Protocol\n"); 849 break; 850 case 0x50: 851 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 852 break; 853 case 0x60: 854 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 855 break; 856 case 0x70: 857 sprintf(buf+len, "Automation/Drive Interface Transport" 858 " Protocol\n"); 859 break; 860 case 0x80: 861 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 862 break; 863 default: 864 sprintf(buf+len, "Unknown 0x%02x\n", 865 vpd->protocol_identifier); 866 break; 867 } 868 869 if (p_buf) 870 strncpy(p_buf, buf, p_buf_len); 871 else 872 pr_debug("%s", buf); 873 } 874 875 void 876 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 877 { 878 /* 879 * Check if the Protocol Identifier Valid (PIV) bit is set.. 880 * 881 * from spc3r23.pdf section 7.5.1 882 */ 883 if (page_83[1] & 0x80) { 884 vpd->protocol_identifier = (page_83[0] & 0xf0); 885 vpd->protocol_identifier_set = 1; 886 transport_dump_vpd_proto_id(vpd, NULL, 0); 887 } 888 } 889 EXPORT_SYMBOL(transport_set_vpd_proto_id); 890 891 int transport_dump_vpd_assoc( 892 struct t10_vpd *vpd, 893 unsigned char *p_buf, 894 int p_buf_len) 895 { 896 unsigned char buf[VPD_TMP_BUF_SIZE]; 897 int ret = 0; 898 int len; 899 900 memset(buf, 0, VPD_TMP_BUF_SIZE); 901 len = sprintf(buf, "T10 VPD Identifier Association: "); 902 903 switch (vpd->association) { 904 case 0x00: 905 sprintf(buf+len, "addressed logical unit\n"); 906 break; 907 case 0x10: 908 sprintf(buf+len, "target port\n"); 909 break; 910 case 0x20: 911 sprintf(buf+len, "SCSI target device\n"); 912 break; 913 default: 914 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 915 ret = -EINVAL; 916 break; 917 } 918 919 if (p_buf) 920 strncpy(p_buf, buf, p_buf_len); 921 else 922 pr_debug("%s", buf); 923 924 return ret; 925 } 926 927 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 928 { 929 /* 930 * The VPD identification association.. 931 * 932 * from spc3r23.pdf Section 7.6.3.1 Table 297 933 */ 934 vpd->association = (page_83[1] & 0x30); 935 return transport_dump_vpd_assoc(vpd, NULL, 0); 936 } 937 EXPORT_SYMBOL(transport_set_vpd_assoc); 938 939 int transport_dump_vpd_ident_type( 940 struct t10_vpd *vpd, 941 unsigned char *p_buf, 942 int p_buf_len) 943 { 944 unsigned char buf[VPD_TMP_BUF_SIZE]; 945 int ret = 0; 946 int len; 947 948 memset(buf, 0, VPD_TMP_BUF_SIZE); 949 len = sprintf(buf, "T10 VPD Identifier Type: "); 950 951 switch (vpd->device_identifier_type) { 952 case 0x00: 953 sprintf(buf+len, "Vendor specific\n"); 954 break; 955 case 0x01: 956 sprintf(buf+len, "T10 Vendor ID based\n"); 957 break; 958 case 0x02: 959 sprintf(buf+len, "EUI-64 based\n"); 960 break; 961 case 0x03: 962 sprintf(buf+len, "NAA\n"); 963 break; 964 case 0x04: 965 sprintf(buf+len, "Relative target port identifier\n"); 966 break; 967 case 0x08: 968 sprintf(buf+len, "SCSI name string\n"); 969 break; 970 default: 971 sprintf(buf+len, "Unsupported: 0x%02x\n", 972 vpd->device_identifier_type); 973 ret = -EINVAL; 974 break; 975 } 976 977 if (p_buf) { 978 if (p_buf_len < strlen(buf)+1) 979 return -EINVAL; 980 strncpy(p_buf, buf, p_buf_len); 981 } else { 982 pr_debug("%s", buf); 983 } 984 985 return ret; 986 } 987 988 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 989 { 990 /* 991 * The VPD identifier type.. 992 * 993 * from spc3r23.pdf Section 7.6.3.1 Table 298 994 */ 995 vpd->device_identifier_type = (page_83[1] & 0x0f); 996 return transport_dump_vpd_ident_type(vpd, NULL, 0); 997 } 998 EXPORT_SYMBOL(transport_set_vpd_ident_type); 999 1000 int transport_dump_vpd_ident( 1001 struct t10_vpd *vpd, 1002 unsigned char *p_buf, 1003 int p_buf_len) 1004 { 1005 unsigned char buf[VPD_TMP_BUF_SIZE]; 1006 int ret = 0; 1007 1008 memset(buf, 0, VPD_TMP_BUF_SIZE); 1009 1010 switch (vpd->device_identifier_code_set) { 1011 case 0x01: /* Binary */ 1012 snprintf(buf, sizeof(buf), 1013 "T10 VPD Binary Device Identifier: %s\n", 1014 &vpd->device_identifier[0]); 1015 break; 1016 case 0x02: /* ASCII */ 1017 snprintf(buf, sizeof(buf), 1018 "T10 VPD ASCII Device Identifier: %s\n", 1019 &vpd->device_identifier[0]); 1020 break; 1021 case 0x03: /* UTF-8 */ 1022 snprintf(buf, sizeof(buf), 1023 "T10 VPD UTF-8 Device Identifier: %s\n", 1024 &vpd->device_identifier[0]); 1025 break; 1026 default: 1027 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1028 " 0x%02x", vpd->device_identifier_code_set); 1029 ret = -EINVAL; 1030 break; 1031 } 1032 1033 if (p_buf) 1034 strncpy(p_buf, buf, p_buf_len); 1035 else 1036 pr_debug("%s", buf); 1037 1038 return ret; 1039 } 1040 1041 int 1042 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1043 { 1044 static const char hex_str[] = "0123456789abcdef"; 1045 int j = 0, i = 4; /* offset to start of the identifier */ 1046 1047 /* 1048 * The VPD Code Set (encoding) 1049 * 1050 * from spc3r23.pdf Section 7.6.3.1 Table 296 1051 */ 1052 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1053 switch (vpd->device_identifier_code_set) { 1054 case 0x01: /* Binary */ 1055 vpd->device_identifier[j++] = 1056 hex_str[vpd->device_identifier_type]; 1057 while (i < (4 + page_83[3])) { 1058 vpd->device_identifier[j++] = 1059 hex_str[(page_83[i] & 0xf0) >> 4]; 1060 vpd->device_identifier[j++] = 1061 hex_str[page_83[i] & 0x0f]; 1062 i++; 1063 } 1064 break; 1065 case 0x02: /* ASCII */ 1066 case 0x03: /* UTF-8 */ 1067 while (i < (4 + page_83[3])) 1068 vpd->device_identifier[j++] = page_83[i++]; 1069 break; 1070 default: 1071 break; 1072 } 1073 1074 return transport_dump_vpd_ident(vpd, NULL, 0); 1075 } 1076 EXPORT_SYMBOL(transport_set_vpd_ident); 1077 1078 static sense_reason_t 1079 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1080 unsigned int size) 1081 { 1082 u32 mtl; 1083 1084 if (!cmd->se_tfo->max_data_sg_nents) 1085 return TCM_NO_SENSE; 1086 /* 1087 * Check if fabric enforced maximum SGL entries per I/O descriptor 1088 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1089 * residual_count and reduce original cmd->data_length to maximum 1090 * length based on single PAGE_SIZE entry scatter-lists. 1091 */ 1092 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1093 if (cmd->data_length > mtl) { 1094 /* 1095 * If an existing CDB overflow is present, calculate new residual 1096 * based on CDB size minus fabric maximum transfer length. 1097 * 1098 * If an existing CDB underflow is present, calculate new residual 1099 * based on original cmd->data_length minus fabric maximum transfer 1100 * length. 1101 * 1102 * Otherwise, set the underflow residual based on cmd->data_length 1103 * minus fabric maximum transfer length. 1104 */ 1105 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1106 cmd->residual_count = (size - mtl); 1107 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1108 u32 orig_dl = size + cmd->residual_count; 1109 cmd->residual_count = (orig_dl - mtl); 1110 } else { 1111 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1112 cmd->residual_count = (cmd->data_length - mtl); 1113 } 1114 cmd->data_length = mtl; 1115 /* 1116 * Reset sbc_check_prot() calculated protection payload 1117 * length based upon the new smaller MTL. 1118 */ 1119 if (cmd->prot_length) { 1120 u32 sectors = (mtl / dev->dev_attrib.block_size); 1121 cmd->prot_length = dev->prot_length * sectors; 1122 } 1123 } 1124 return TCM_NO_SENSE; 1125 } 1126 1127 sense_reason_t 1128 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1129 { 1130 struct se_device *dev = cmd->se_dev; 1131 1132 if (cmd->unknown_data_length) { 1133 cmd->data_length = size; 1134 } else if (size != cmd->data_length) { 1135 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1136 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1137 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1138 cmd->data_length, size, cmd->t_task_cdb[0]); 1139 1140 if (cmd->data_direction == DMA_TO_DEVICE && 1141 cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1142 pr_err("Rejecting underflow/overflow WRITE data\n"); 1143 return TCM_INVALID_CDB_FIELD; 1144 } 1145 /* 1146 * Reject READ_* or WRITE_* with overflow/underflow for 1147 * type SCF_SCSI_DATA_CDB. 1148 */ 1149 if (dev->dev_attrib.block_size != 512) { 1150 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1151 " CDB on non 512-byte sector setup subsystem" 1152 " plugin: %s\n", dev->transport->name); 1153 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1154 return TCM_INVALID_CDB_FIELD; 1155 } 1156 /* 1157 * For the overflow case keep the existing fabric provided 1158 * ->data_length. Otherwise for the underflow case, reset 1159 * ->data_length to the smaller SCSI expected data transfer 1160 * length. 1161 */ 1162 if (size > cmd->data_length) { 1163 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1164 cmd->residual_count = (size - cmd->data_length); 1165 } else { 1166 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1167 cmd->residual_count = (cmd->data_length - size); 1168 cmd->data_length = size; 1169 } 1170 } 1171 1172 return target_check_max_data_sg_nents(cmd, dev, size); 1173 1174 } 1175 1176 /* 1177 * Used by fabric modules containing a local struct se_cmd within their 1178 * fabric dependent per I/O descriptor. 1179 * 1180 * Preserves the value of @cmd->tag. 1181 */ 1182 void transport_init_se_cmd( 1183 struct se_cmd *cmd, 1184 const struct target_core_fabric_ops *tfo, 1185 struct se_session *se_sess, 1186 u32 data_length, 1187 int data_direction, 1188 int task_attr, 1189 unsigned char *sense_buffer) 1190 { 1191 INIT_LIST_HEAD(&cmd->se_delayed_node); 1192 INIT_LIST_HEAD(&cmd->se_qf_node); 1193 INIT_LIST_HEAD(&cmd->se_cmd_list); 1194 INIT_LIST_HEAD(&cmd->state_list); 1195 init_completion(&cmd->t_transport_stop_comp); 1196 init_completion(&cmd->cmd_wait_comp); 1197 init_completion(&cmd->task_stop_comp); 1198 spin_lock_init(&cmd->t_state_lock); 1199 kref_init(&cmd->cmd_kref); 1200 cmd->transport_state = CMD_T_DEV_ACTIVE; 1201 1202 cmd->se_tfo = tfo; 1203 cmd->se_sess = se_sess; 1204 cmd->data_length = data_length; 1205 cmd->data_direction = data_direction; 1206 cmd->sam_task_attr = task_attr; 1207 cmd->sense_buffer = sense_buffer; 1208 1209 cmd->state_active = false; 1210 } 1211 EXPORT_SYMBOL(transport_init_se_cmd); 1212 1213 static sense_reason_t 1214 transport_check_alloc_task_attr(struct se_cmd *cmd) 1215 { 1216 struct se_device *dev = cmd->se_dev; 1217 1218 /* 1219 * Check if SAM Task Attribute emulation is enabled for this 1220 * struct se_device storage object 1221 */ 1222 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1223 return 0; 1224 1225 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1226 pr_debug("SAM Task Attribute ACA" 1227 " emulation is not supported\n"); 1228 return TCM_INVALID_CDB_FIELD; 1229 } 1230 1231 return 0; 1232 } 1233 1234 sense_reason_t 1235 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1236 { 1237 struct se_device *dev = cmd->se_dev; 1238 sense_reason_t ret; 1239 1240 /* 1241 * Ensure that the received CDB is less than the max (252 + 8) bytes 1242 * for VARIABLE_LENGTH_CMD 1243 */ 1244 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1245 pr_err("Received SCSI CDB with command_size: %d that" 1246 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1247 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1248 return TCM_INVALID_CDB_FIELD; 1249 } 1250 /* 1251 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1252 * allocate the additional extended CDB buffer now.. Otherwise 1253 * setup the pointer from __t_task_cdb to t_task_cdb. 1254 */ 1255 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1256 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1257 GFP_KERNEL); 1258 if (!cmd->t_task_cdb) { 1259 pr_err("Unable to allocate cmd->t_task_cdb" 1260 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1261 scsi_command_size(cdb), 1262 (unsigned long)sizeof(cmd->__t_task_cdb)); 1263 return TCM_OUT_OF_RESOURCES; 1264 } 1265 } else 1266 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1267 /* 1268 * Copy the original CDB into cmd-> 1269 */ 1270 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1271 1272 trace_target_sequencer_start(cmd); 1273 1274 /* 1275 * Check for an existing UNIT ATTENTION condition 1276 */ 1277 ret = target_scsi3_ua_check(cmd); 1278 if (ret) 1279 return ret; 1280 1281 ret = target_alua_state_check(cmd); 1282 if (ret) 1283 return ret; 1284 1285 ret = target_check_reservation(cmd); 1286 if (ret) { 1287 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1288 return ret; 1289 } 1290 1291 ret = dev->transport->parse_cdb(cmd); 1292 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1293 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1294 cmd->se_tfo->get_fabric_name(), 1295 cmd->se_sess->se_node_acl->initiatorname, 1296 cmd->t_task_cdb[0]); 1297 if (ret) 1298 return ret; 1299 1300 ret = transport_check_alloc_task_attr(cmd); 1301 if (ret) 1302 return ret; 1303 1304 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1305 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1306 return 0; 1307 } 1308 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1309 1310 /* 1311 * Used by fabric module frontends to queue tasks directly. 1312 * Many only be used from process context only 1313 */ 1314 int transport_handle_cdb_direct( 1315 struct se_cmd *cmd) 1316 { 1317 sense_reason_t ret; 1318 1319 if (!cmd->se_lun) { 1320 dump_stack(); 1321 pr_err("cmd->se_lun is NULL\n"); 1322 return -EINVAL; 1323 } 1324 if (in_interrupt()) { 1325 dump_stack(); 1326 pr_err("transport_generic_handle_cdb cannot be called" 1327 " from interrupt context\n"); 1328 return -EINVAL; 1329 } 1330 /* 1331 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1332 * outstanding descriptors are handled correctly during shutdown via 1333 * transport_wait_for_tasks() 1334 * 1335 * Also, we don't take cmd->t_state_lock here as we only expect 1336 * this to be called for initial descriptor submission. 1337 */ 1338 cmd->t_state = TRANSPORT_NEW_CMD; 1339 cmd->transport_state |= CMD_T_ACTIVE; 1340 1341 /* 1342 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1343 * so follow TRANSPORT_NEW_CMD processing thread context usage 1344 * and call transport_generic_request_failure() if necessary.. 1345 */ 1346 ret = transport_generic_new_cmd(cmd); 1347 if (ret) 1348 transport_generic_request_failure(cmd, ret); 1349 return 0; 1350 } 1351 EXPORT_SYMBOL(transport_handle_cdb_direct); 1352 1353 sense_reason_t 1354 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1355 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1356 { 1357 if (!sgl || !sgl_count) 1358 return 0; 1359 1360 /* 1361 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1362 * scatterlists already have been set to follow what the fabric 1363 * passes for the original expected data transfer length. 1364 */ 1365 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1366 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1367 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1368 return TCM_INVALID_CDB_FIELD; 1369 } 1370 1371 cmd->t_data_sg = sgl; 1372 cmd->t_data_nents = sgl_count; 1373 cmd->t_bidi_data_sg = sgl_bidi; 1374 cmd->t_bidi_data_nents = sgl_bidi_count; 1375 1376 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1377 return 0; 1378 } 1379 1380 /* 1381 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1382 * se_cmd + use pre-allocated SGL memory. 1383 * 1384 * @se_cmd: command descriptor to submit 1385 * @se_sess: associated se_sess for endpoint 1386 * @cdb: pointer to SCSI CDB 1387 * @sense: pointer to SCSI sense buffer 1388 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1389 * @data_length: fabric expected data transfer length 1390 * @task_addr: SAM task attribute 1391 * @data_dir: DMA data direction 1392 * @flags: flags for command submission from target_sc_flags_tables 1393 * @sgl: struct scatterlist memory for unidirectional mapping 1394 * @sgl_count: scatterlist count for unidirectional mapping 1395 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1396 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1397 * @sgl_prot: struct scatterlist memory protection information 1398 * @sgl_prot_count: scatterlist count for protection information 1399 * 1400 * Task tags are supported if the caller has set @se_cmd->tag. 1401 * 1402 * Returns non zero to signal active I/O shutdown failure. All other 1403 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1404 * but still return zero here. 1405 * 1406 * This may only be called from process context, and also currently 1407 * assumes internal allocation of fabric payload buffer by target-core. 1408 */ 1409 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1410 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1411 u32 data_length, int task_attr, int data_dir, int flags, 1412 struct scatterlist *sgl, u32 sgl_count, 1413 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1414 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1415 { 1416 struct se_portal_group *se_tpg; 1417 sense_reason_t rc; 1418 int ret; 1419 1420 se_tpg = se_sess->se_tpg; 1421 BUG_ON(!se_tpg); 1422 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1423 BUG_ON(in_interrupt()); 1424 /* 1425 * Initialize se_cmd for target operation. From this point 1426 * exceptions are handled by sending exception status via 1427 * target_core_fabric_ops->queue_status() callback 1428 */ 1429 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1430 data_length, data_dir, task_attr, sense); 1431 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1432 se_cmd->unknown_data_length = 1; 1433 /* 1434 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1435 * se_sess->sess_cmd_list. A second kref_get here is necessary 1436 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1437 * kref_put() to happen during fabric packet acknowledgement. 1438 */ 1439 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1440 if (ret) 1441 return ret; 1442 /* 1443 * Signal bidirectional data payloads to target-core 1444 */ 1445 if (flags & TARGET_SCF_BIDI_OP) 1446 se_cmd->se_cmd_flags |= SCF_BIDI; 1447 /* 1448 * Locate se_lun pointer and attach it to struct se_cmd 1449 */ 1450 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1451 if (rc) { 1452 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1453 target_put_sess_cmd(se_cmd); 1454 return 0; 1455 } 1456 1457 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1458 if (rc != 0) { 1459 transport_generic_request_failure(se_cmd, rc); 1460 return 0; 1461 } 1462 1463 /* 1464 * Save pointers for SGLs containing protection information, 1465 * if present. 1466 */ 1467 if (sgl_prot_count) { 1468 se_cmd->t_prot_sg = sgl_prot; 1469 se_cmd->t_prot_nents = sgl_prot_count; 1470 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1471 } 1472 1473 /* 1474 * When a non zero sgl_count has been passed perform SGL passthrough 1475 * mapping for pre-allocated fabric memory instead of having target 1476 * core perform an internal SGL allocation.. 1477 */ 1478 if (sgl_count != 0) { 1479 BUG_ON(!sgl); 1480 1481 /* 1482 * A work-around for tcm_loop as some userspace code via 1483 * scsi-generic do not memset their associated read buffers, 1484 * so go ahead and do that here for type non-data CDBs. Also 1485 * note that this is currently guaranteed to be a single SGL 1486 * for this case by target core in target_setup_cmd_from_cdb() 1487 * -> transport_generic_cmd_sequencer(). 1488 */ 1489 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1490 se_cmd->data_direction == DMA_FROM_DEVICE) { 1491 unsigned char *buf = NULL; 1492 1493 if (sgl) 1494 buf = kmap(sg_page(sgl)) + sgl->offset; 1495 1496 if (buf) { 1497 memset(buf, 0, sgl->length); 1498 kunmap(sg_page(sgl)); 1499 } 1500 } 1501 1502 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1503 sgl_bidi, sgl_bidi_count); 1504 if (rc != 0) { 1505 transport_generic_request_failure(se_cmd, rc); 1506 return 0; 1507 } 1508 } 1509 1510 /* 1511 * Check if we need to delay processing because of ALUA 1512 * Active/NonOptimized primary access state.. 1513 */ 1514 core_alua_check_nonop_delay(se_cmd); 1515 1516 transport_handle_cdb_direct(se_cmd); 1517 return 0; 1518 } 1519 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1520 1521 /* 1522 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1523 * 1524 * @se_cmd: command descriptor to submit 1525 * @se_sess: associated se_sess for endpoint 1526 * @cdb: pointer to SCSI CDB 1527 * @sense: pointer to SCSI sense buffer 1528 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1529 * @data_length: fabric expected data transfer length 1530 * @task_addr: SAM task attribute 1531 * @data_dir: DMA data direction 1532 * @flags: flags for command submission from target_sc_flags_tables 1533 * 1534 * Task tags are supported if the caller has set @se_cmd->tag. 1535 * 1536 * Returns non zero to signal active I/O shutdown failure. All other 1537 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1538 * but still return zero here. 1539 * 1540 * This may only be called from process context, and also currently 1541 * assumes internal allocation of fabric payload buffer by target-core. 1542 * 1543 * It also assumes interal target core SGL memory allocation. 1544 */ 1545 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1546 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1547 u32 data_length, int task_attr, int data_dir, int flags) 1548 { 1549 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1550 unpacked_lun, data_length, task_attr, data_dir, 1551 flags, NULL, 0, NULL, 0, NULL, 0); 1552 } 1553 EXPORT_SYMBOL(target_submit_cmd); 1554 1555 static void target_complete_tmr_failure(struct work_struct *work) 1556 { 1557 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1558 1559 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1560 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1561 1562 transport_cmd_check_stop_to_fabric(se_cmd); 1563 } 1564 1565 /** 1566 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1567 * for TMR CDBs 1568 * 1569 * @se_cmd: command descriptor to submit 1570 * @se_sess: associated se_sess for endpoint 1571 * @sense: pointer to SCSI sense buffer 1572 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1573 * @fabric_context: fabric context for TMR req 1574 * @tm_type: Type of TM request 1575 * @gfp: gfp type for caller 1576 * @tag: referenced task tag for TMR_ABORT_TASK 1577 * @flags: submit cmd flags 1578 * 1579 * Callable from all contexts. 1580 **/ 1581 1582 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1583 unsigned char *sense, u64 unpacked_lun, 1584 void *fabric_tmr_ptr, unsigned char tm_type, 1585 gfp_t gfp, unsigned int tag, int flags) 1586 { 1587 struct se_portal_group *se_tpg; 1588 int ret; 1589 1590 se_tpg = se_sess->se_tpg; 1591 BUG_ON(!se_tpg); 1592 1593 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1594 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1595 /* 1596 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1597 * allocation failure. 1598 */ 1599 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1600 if (ret < 0) 1601 return -ENOMEM; 1602 1603 if (tm_type == TMR_ABORT_TASK) 1604 se_cmd->se_tmr_req->ref_task_tag = tag; 1605 1606 /* See target_submit_cmd for commentary */ 1607 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1608 if (ret) { 1609 core_tmr_release_req(se_cmd->se_tmr_req); 1610 return ret; 1611 } 1612 1613 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1614 if (ret) { 1615 /* 1616 * For callback during failure handling, push this work off 1617 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1618 */ 1619 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1620 schedule_work(&se_cmd->work); 1621 return 0; 1622 } 1623 transport_generic_handle_tmr(se_cmd); 1624 return 0; 1625 } 1626 EXPORT_SYMBOL(target_submit_tmr); 1627 1628 /* 1629 * If the cmd is active, request it to be stopped and sleep until it 1630 * has completed. 1631 */ 1632 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) 1633 __releases(&cmd->t_state_lock) 1634 __acquires(&cmd->t_state_lock) 1635 { 1636 bool was_active = false; 1637 1638 if (cmd->transport_state & CMD_T_BUSY) { 1639 cmd->transport_state |= CMD_T_REQUEST_STOP; 1640 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1641 1642 pr_debug("cmd %p waiting to complete\n", cmd); 1643 wait_for_completion(&cmd->task_stop_comp); 1644 pr_debug("cmd %p stopped successfully\n", cmd); 1645 1646 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1647 cmd->transport_state &= ~CMD_T_REQUEST_STOP; 1648 cmd->transport_state &= ~CMD_T_BUSY; 1649 was_active = true; 1650 } 1651 1652 return was_active; 1653 } 1654 1655 /* 1656 * Handle SAM-esque emulation for generic transport request failures. 1657 */ 1658 void transport_generic_request_failure(struct se_cmd *cmd, 1659 sense_reason_t sense_reason) 1660 { 1661 int ret = 0; 1662 1663 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1664 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1665 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1666 cmd->se_tfo->get_cmd_state(cmd), 1667 cmd->t_state, sense_reason); 1668 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1669 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1670 (cmd->transport_state & CMD_T_STOP) != 0, 1671 (cmd->transport_state & CMD_T_SENT) != 0); 1672 1673 /* 1674 * For SAM Task Attribute emulation for failed struct se_cmd 1675 */ 1676 transport_complete_task_attr(cmd); 1677 /* 1678 * Handle special case for COMPARE_AND_WRITE failure, where the 1679 * callback is expected to drop the per device ->caw_sem. 1680 */ 1681 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1682 cmd->transport_complete_callback) 1683 cmd->transport_complete_callback(cmd, false); 1684 1685 switch (sense_reason) { 1686 case TCM_NON_EXISTENT_LUN: 1687 case TCM_UNSUPPORTED_SCSI_OPCODE: 1688 case TCM_INVALID_CDB_FIELD: 1689 case TCM_INVALID_PARAMETER_LIST: 1690 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1691 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1692 case TCM_UNKNOWN_MODE_PAGE: 1693 case TCM_WRITE_PROTECTED: 1694 case TCM_ADDRESS_OUT_OF_RANGE: 1695 case TCM_CHECK_CONDITION_ABORT_CMD: 1696 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1697 case TCM_CHECK_CONDITION_NOT_READY: 1698 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1699 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1700 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1701 break; 1702 case TCM_OUT_OF_RESOURCES: 1703 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1704 break; 1705 case TCM_RESERVATION_CONFLICT: 1706 /* 1707 * No SENSE Data payload for this case, set SCSI Status 1708 * and queue the response to $FABRIC_MOD. 1709 * 1710 * Uses linux/include/scsi/scsi.h SAM status codes defs 1711 */ 1712 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1713 /* 1714 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1715 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1716 * CONFLICT STATUS. 1717 * 1718 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1719 */ 1720 if (cmd->se_sess && 1721 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1722 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1723 cmd->orig_fe_lun, 0x2C, 1724 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1725 } 1726 trace_target_cmd_complete(cmd); 1727 ret = cmd->se_tfo->queue_status(cmd); 1728 if (ret == -EAGAIN || ret == -ENOMEM) 1729 goto queue_full; 1730 goto check_stop; 1731 default: 1732 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1733 cmd->t_task_cdb[0], sense_reason); 1734 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1735 break; 1736 } 1737 1738 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1739 if (ret == -EAGAIN || ret == -ENOMEM) 1740 goto queue_full; 1741 1742 check_stop: 1743 transport_lun_remove_cmd(cmd); 1744 transport_cmd_check_stop_to_fabric(cmd); 1745 return; 1746 1747 queue_full: 1748 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1749 transport_handle_queue_full(cmd, cmd->se_dev); 1750 } 1751 EXPORT_SYMBOL(transport_generic_request_failure); 1752 1753 void __target_execute_cmd(struct se_cmd *cmd) 1754 { 1755 sense_reason_t ret; 1756 1757 if (cmd->execute_cmd) { 1758 ret = cmd->execute_cmd(cmd); 1759 if (ret) { 1760 spin_lock_irq(&cmd->t_state_lock); 1761 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1762 spin_unlock_irq(&cmd->t_state_lock); 1763 1764 transport_generic_request_failure(cmd, ret); 1765 } 1766 } 1767 } 1768 1769 static int target_write_prot_action(struct se_cmd *cmd) 1770 { 1771 u32 sectors; 1772 /* 1773 * Perform WRITE_INSERT of PI using software emulation when backend 1774 * device has PI enabled, if the transport has not already generated 1775 * PI using hardware WRITE_INSERT offload. 1776 */ 1777 switch (cmd->prot_op) { 1778 case TARGET_PROT_DOUT_INSERT: 1779 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1780 sbc_dif_generate(cmd); 1781 break; 1782 case TARGET_PROT_DOUT_STRIP: 1783 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1784 break; 1785 1786 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1787 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1788 sectors, 0, cmd->t_prot_sg, 0); 1789 if (unlikely(cmd->pi_err)) { 1790 spin_lock_irq(&cmd->t_state_lock); 1791 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1792 spin_unlock_irq(&cmd->t_state_lock); 1793 transport_generic_request_failure(cmd, cmd->pi_err); 1794 return -1; 1795 } 1796 break; 1797 default: 1798 break; 1799 } 1800 1801 return 0; 1802 } 1803 1804 static bool target_handle_task_attr(struct se_cmd *cmd) 1805 { 1806 struct se_device *dev = cmd->se_dev; 1807 1808 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1809 return false; 1810 1811 /* 1812 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1813 * to allow the passed struct se_cmd list of tasks to the front of the list. 1814 */ 1815 switch (cmd->sam_task_attr) { 1816 case TCM_HEAD_TAG: 1817 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 1818 cmd->t_task_cdb[0]); 1819 return false; 1820 case TCM_ORDERED_TAG: 1821 atomic_inc_mb(&dev->dev_ordered_sync); 1822 1823 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 1824 cmd->t_task_cdb[0]); 1825 1826 /* 1827 * Execute an ORDERED command if no other older commands 1828 * exist that need to be completed first. 1829 */ 1830 if (!atomic_read(&dev->simple_cmds)) 1831 return false; 1832 break; 1833 default: 1834 /* 1835 * For SIMPLE and UNTAGGED Task Attribute commands 1836 */ 1837 atomic_inc_mb(&dev->simple_cmds); 1838 break; 1839 } 1840 1841 if (atomic_read(&dev->dev_ordered_sync) == 0) 1842 return false; 1843 1844 spin_lock(&dev->delayed_cmd_lock); 1845 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1846 spin_unlock(&dev->delayed_cmd_lock); 1847 1848 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 1849 cmd->t_task_cdb[0], cmd->sam_task_attr); 1850 return true; 1851 } 1852 1853 void target_execute_cmd(struct se_cmd *cmd) 1854 { 1855 /* 1856 * If the received CDB has aleady been aborted stop processing it here. 1857 */ 1858 if (transport_check_aborted_status(cmd, 1)) 1859 return; 1860 1861 /* 1862 * Determine if frontend context caller is requesting the stopping of 1863 * this command for frontend exceptions. 1864 */ 1865 spin_lock_irq(&cmd->t_state_lock); 1866 if (cmd->transport_state & CMD_T_STOP) { 1867 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 1868 __func__, __LINE__, cmd->tag); 1869 1870 spin_unlock_irq(&cmd->t_state_lock); 1871 complete_all(&cmd->t_transport_stop_comp); 1872 return; 1873 } 1874 1875 cmd->t_state = TRANSPORT_PROCESSING; 1876 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1877 spin_unlock_irq(&cmd->t_state_lock); 1878 1879 if (target_write_prot_action(cmd)) 1880 return; 1881 1882 if (target_handle_task_attr(cmd)) { 1883 spin_lock_irq(&cmd->t_state_lock); 1884 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); 1885 spin_unlock_irq(&cmd->t_state_lock); 1886 return; 1887 } 1888 1889 __target_execute_cmd(cmd); 1890 } 1891 EXPORT_SYMBOL(target_execute_cmd); 1892 1893 /* 1894 * Process all commands up to the last received ORDERED task attribute which 1895 * requires another blocking boundary 1896 */ 1897 static void target_restart_delayed_cmds(struct se_device *dev) 1898 { 1899 for (;;) { 1900 struct se_cmd *cmd; 1901 1902 spin_lock(&dev->delayed_cmd_lock); 1903 if (list_empty(&dev->delayed_cmd_list)) { 1904 spin_unlock(&dev->delayed_cmd_lock); 1905 break; 1906 } 1907 1908 cmd = list_entry(dev->delayed_cmd_list.next, 1909 struct se_cmd, se_delayed_node); 1910 list_del(&cmd->se_delayed_node); 1911 spin_unlock(&dev->delayed_cmd_lock); 1912 1913 __target_execute_cmd(cmd); 1914 1915 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 1916 break; 1917 } 1918 } 1919 1920 /* 1921 * Called from I/O completion to determine which dormant/delayed 1922 * and ordered cmds need to have their tasks added to the execution queue. 1923 */ 1924 static void transport_complete_task_attr(struct se_cmd *cmd) 1925 { 1926 struct se_device *dev = cmd->se_dev; 1927 1928 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1929 return; 1930 1931 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 1932 atomic_dec_mb(&dev->simple_cmds); 1933 dev->dev_cur_ordered_id++; 1934 pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n", 1935 dev->dev_cur_ordered_id); 1936 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 1937 dev->dev_cur_ordered_id++; 1938 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 1939 dev->dev_cur_ordered_id); 1940 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 1941 atomic_dec_mb(&dev->dev_ordered_sync); 1942 1943 dev->dev_cur_ordered_id++; 1944 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 1945 dev->dev_cur_ordered_id); 1946 } 1947 1948 target_restart_delayed_cmds(dev); 1949 } 1950 1951 static void transport_complete_qf(struct se_cmd *cmd) 1952 { 1953 int ret = 0; 1954 1955 transport_complete_task_attr(cmd); 1956 1957 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1958 trace_target_cmd_complete(cmd); 1959 ret = cmd->se_tfo->queue_status(cmd); 1960 goto out; 1961 } 1962 1963 switch (cmd->data_direction) { 1964 case DMA_FROM_DEVICE: 1965 trace_target_cmd_complete(cmd); 1966 ret = cmd->se_tfo->queue_data_in(cmd); 1967 break; 1968 case DMA_TO_DEVICE: 1969 if (cmd->se_cmd_flags & SCF_BIDI) { 1970 ret = cmd->se_tfo->queue_data_in(cmd); 1971 break; 1972 } 1973 /* Fall through for DMA_TO_DEVICE */ 1974 case DMA_NONE: 1975 trace_target_cmd_complete(cmd); 1976 ret = cmd->se_tfo->queue_status(cmd); 1977 break; 1978 default: 1979 break; 1980 } 1981 1982 out: 1983 if (ret < 0) { 1984 transport_handle_queue_full(cmd, cmd->se_dev); 1985 return; 1986 } 1987 transport_lun_remove_cmd(cmd); 1988 transport_cmd_check_stop_to_fabric(cmd); 1989 } 1990 1991 static void transport_handle_queue_full( 1992 struct se_cmd *cmd, 1993 struct se_device *dev) 1994 { 1995 spin_lock_irq(&dev->qf_cmd_lock); 1996 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1997 atomic_inc_mb(&dev->dev_qf_count); 1998 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1999 2000 schedule_work(&cmd->se_dev->qf_work_queue); 2001 } 2002 2003 static bool target_read_prot_action(struct se_cmd *cmd) 2004 { 2005 switch (cmd->prot_op) { 2006 case TARGET_PROT_DIN_STRIP: 2007 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2008 u32 sectors = cmd->data_length >> 2009 ilog2(cmd->se_dev->dev_attrib.block_size); 2010 2011 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2012 sectors, 0, cmd->t_prot_sg, 2013 0); 2014 if (cmd->pi_err) 2015 return true; 2016 } 2017 break; 2018 case TARGET_PROT_DIN_INSERT: 2019 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2020 break; 2021 2022 sbc_dif_generate(cmd); 2023 break; 2024 default: 2025 break; 2026 } 2027 2028 return false; 2029 } 2030 2031 static void target_complete_ok_work(struct work_struct *work) 2032 { 2033 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2034 int ret; 2035 2036 /* 2037 * Check if we need to move delayed/dormant tasks from cmds on the 2038 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2039 * Attribute. 2040 */ 2041 transport_complete_task_attr(cmd); 2042 2043 /* 2044 * Check to schedule QUEUE_FULL work, or execute an existing 2045 * cmd->transport_qf_callback() 2046 */ 2047 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2048 schedule_work(&cmd->se_dev->qf_work_queue); 2049 2050 /* 2051 * Check if we need to send a sense buffer from 2052 * the struct se_cmd in question. 2053 */ 2054 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2055 WARN_ON(!cmd->scsi_status); 2056 ret = transport_send_check_condition_and_sense( 2057 cmd, 0, 1); 2058 if (ret == -EAGAIN || ret == -ENOMEM) 2059 goto queue_full; 2060 2061 transport_lun_remove_cmd(cmd); 2062 transport_cmd_check_stop_to_fabric(cmd); 2063 return; 2064 } 2065 /* 2066 * Check for a callback, used by amongst other things 2067 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2068 */ 2069 if (cmd->transport_complete_callback) { 2070 sense_reason_t rc; 2071 2072 rc = cmd->transport_complete_callback(cmd, true); 2073 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 2074 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2075 !cmd->data_length) 2076 goto queue_rsp; 2077 2078 return; 2079 } else if (rc) { 2080 ret = transport_send_check_condition_and_sense(cmd, 2081 rc, 0); 2082 if (ret == -EAGAIN || ret == -ENOMEM) 2083 goto queue_full; 2084 2085 transport_lun_remove_cmd(cmd); 2086 transport_cmd_check_stop_to_fabric(cmd); 2087 return; 2088 } 2089 } 2090 2091 queue_rsp: 2092 switch (cmd->data_direction) { 2093 case DMA_FROM_DEVICE: 2094 atomic_long_add(cmd->data_length, 2095 &cmd->se_lun->lun_stats.tx_data_octets); 2096 /* 2097 * Perform READ_STRIP of PI using software emulation when 2098 * backend had PI enabled, if the transport will not be 2099 * performing hardware READ_STRIP offload. 2100 */ 2101 if (target_read_prot_action(cmd)) { 2102 ret = transport_send_check_condition_and_sense(cmd, 2103 cmd->pi_err, 0); 2104 if (ret == -EAGAIN || ret == -ENOMEM) 2105 goto queue_full; 2106 2107 transport_lun_remove_cmd(cmd); 2108 transport_cmd_check_stop_to_fabric(cmd); 2109 return; 2110 } 2111 2112 trace_target_cmd_complete(cmd); 2113 ret = cmd->se_tfo->queue_data_in(cmd); 2114 if (ret == -EAGAIN || ret == -ENOMEM) 2115 goto queue_full; 2116 break; 2117 case DMA_TO_DEVICE: 2118 atomic_long_add(cmd->data_length, 2119 &cmd->se_lun->lun_stats.rx_data_octets); 2120 /* 2121 * Check if we need to send READ payload for BIDI-COMMAND 2122 */ 2123 if (cmd->se_cmd_flags & SCF_BIDI) { 2124 atomic_long_add(cmd->data_length, 2125 &cmd->se_lun->lun_stats.tx_data_octets); 2126 ret = cmd->se_tfo->queue_data_in(cmd); 2127 if (ret == -EAGAIN || ret == -ENOMEM) 2128 goto queue_full; 2129 break; 2130 } 2131 /* Fall through for DMA_TO_DEVICE */ 2132 case DMA_NONE: 2133 trace_target_cmd_complete(cmd); 2134 ret = cmd->se_tfo->queue_status(cmd); 2135 if (ret == -EAGAIN || ret == -ENOMEM) 2136 goto queue_full; 2137 break; 2138 default: 2139 break; 2140 } 2141 2142 transport_lun_remove_cmd(cmd); 2143 transport_cmd_check_stop_to_fabric(cmd); 2144 return; 2145 2146 queue_full: 2147 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2148 " data_direction: %d\n", cmd, cmd->data_direction); 2149 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2150 transport_handle_queue_full(cmd, cmd->se_dev); 2151 } 2152 2153 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 2154 { 2155 struct scatterlist *sg; 2156 int count; 2157 2158 for_each_sg(sgl, sg, nents, count) 2159 __free_page(sg_page(sg)); 2160 2161 kfree(sgl); 2162 } 2163 2164 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2165 { 2166 /* 2167 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2168 * emulation, and free + reset pointers if necessary.. 2169 */ 2170 if (!cmd->t_data_sg_orig) 2171 return; 2172 2173 kfree(cmd->t_data_sg); 2174 cmd->t_data_sg = cmd->t_data_sg_orig; 2175 cmd->t_data_sg_orig = NULL; 2176 cmd->t_data_nents = cmd->t_data_nents_orig; 2177 cmd->t_data_nents_orig = 0; 2178 } 2179 2180 static inline void transport_free_pages(struct se_cmd *cmd) 2181 { 2182 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2183 transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2184 cmd->t_prot_sg = NULL; 2185 cmd->t_prot_nents = 0; 2186 } 2187 2188 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2189 /* 2190 * Release special case READ buffer payload required for 2191 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2192 */ 2193 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2194 transport_free_sgl(cmd->t_bidi_data_sg, 2195 cmd->t_bidi_data_nents); 2196 cmd->t_bidi_data_sg = NULL; 2197 cmd->t_bidi_data_nents = 0; 2198 } 2199 transport_reset_sgl_orig(cmd); 2200 return; 2201 } 2202 transport_reset_sgl_orig(cmd); 2203 2204 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2205 cmd->t_data_sg = NULL; 2206 cmd->t_data_nents = 0; 2207 2208 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2209 cmd->t_bidi_data_sg = NULL; 2210 cmd->t_bidi_data_nents = 0; 2211 } 2212 2213 /** 2214 * transport_release_cmd - free a command 2215 * @cmd: command to free 2216 * 2217 * This routine unconditionally frees a command, and reference counting 2218 * or list removal must be done in the caller. 2219 */ 2220 static int transport_release_cmd(struct se_cmd *cmd) 2221 { 2222 BUG_ON(!cmd->se_tfo); 2223 2224 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2225 core_tmr_release_req(cmd->se_tmr_req); 2226 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2227 kfree(cmd->t_task_cdb); 2228 /* 2229 * If this cmd has been setup with target_get_sess_cmd(), drop 2230 * the kref and call ->release_cmd() in kref callback. 2231 */ 2232 return target_put_sess_cmd(cmd); 2233 } 2234 2235 /** 2236 * transport_put_cmd - release a reference to a command 2237 * @cmd: command to release 2238 * 2239 * This routine releases our reference to the command and frees it if possible. 2240 */ 2241 static int transport_put_cmd(struct se_cmd *cmd) 2242 { 2243 transport_free_pages(cmd); 2244 return transport_release_cmd(cmd); 2245 } 2246 2247 void *transport_kmap_data_sg(struct se_cmd *cmd) 2248 { 2249 struct scatterlist *sg = cmd->t_data_sg; 2250 struct page **pages; 2251 int i; 2252 2253 /* 2254 * We need to take into account a possible offset here for fabrics like 2255 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2256 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2257 */ 2258 if (!cmd->t_data_nents) 2259 return NULL; 2260 2261 BUG_ON(!sg); 2262 if (cmd->t_data_nents == 1) 2263 return kmap(sg_page(sg)) + sg->offset; 2264 2265 /* >1 page. use vmap */ 2266 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2267 if (!pages) 2268 return NULL; 2269 2270 /* convert sg[] to pages[] */ 2271 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2272 pages[i] = sg_page(sg); 2273 } 2274 2275 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2276 kfree(pages); 2277 if (!cmd->t_data_vmap) 2278 return NULL; 2279 2280 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2281 } 2282 EXPORT_SYMBOL(transport_kmap_data_sg); 2283 2284 void transport_kunmap_data_sg(struct se_cmd *cmd) 2285 { 2286 if (!cmd->t_data_nents) { 2287 return; 2288 } else if (cmd->t_data_nents == 1) { 2289 kunmap(sg_page(cmd->t_data_sg)); 2290 return; 2291 } 2292 2293 vunmap(cmd->t_data_vmap); 2294 cmd->t_data_vmap = NULL; 2295 } 2296 EXPORT_SYMBOL(transport_kunmap_data_sg); 2297 2298 int 2299 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2300 bool zero_page) 2301 { 2302 struct scatterlist *sg; 2303 struct page *page; 2304 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2305 unsigned int nent; 2306 int i = 0; 2307 2308 nent = DIV_ROUND_UP(length, PAGE_SIZE); 2309 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); 2310 if (!sg) 2311 return -ENOMEM; 2312 2313 sg_init_table(sg, nent); 2314 2315 while (length) { 2316 u32 page_len = min_t(u32, length, PAGE_SIZE); 2317 page = alloc_page(GFP_KERNEL | zero_flag); 2318 if (!page) 2319 goto out; 2320 2321 sg_set_page(&sg[i], page, page_len, 0); 2322 length -= page_len; 2323 i++; 2324 } 2325 *sgl = sg; 2326 *nents = nent; 2327 return 0; 2328 2329 out: 2330 while (i > 0) { 2331 i--; 2332 __free_page(sg_page(&sg[i])); 2333 } 2334 kfree(sg); 2335 return -ENOMEM; 2336 } 2337 2338 /* 2339 * Allocate any required resources to execute the command. For writes we 2340 * might not have the payload yet, so notify the fabric via a call to 2341 * ->write_pending instead. Otherwise place it on the execution queue. 2342 */ 2343 sense_reason_t 2344 transport_generic_new_cmd(struct se_cmd *cmd) 2345 { 2346 int ret = 0; 2347 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2348 2349 if (cmd->prot_op != TARGET_PROT_NORMAL && 2350 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2351 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2352 cmd->prot_length, true); 2353 if (ret < 0) 2354 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2355 } 2356 2357 /* 2358 * Determine is the TCM fabric module has already allocated physical 2359 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2360 * beforehand. 2361 */ 2362 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2363 cmd->data_length) { 2364 2365 if ((cmd->se_cmd_flags & SCF_BIDI) || 2366 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2367 u32 bidi_length; 2368 2369 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2370 bidi_length = cmd->t_task_nolb * 2371 cmd->se_dev->dev_attrib.block_size; 2372 else 2373 bidi_length = cmd->data_length; 2374 2375 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2376 &cmd->t_bidi_data_nents, 2377 bidi_length, zero_flag); 2378 if (ret < 0) 2379 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2380 } 2381 2382 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2383 cmd->data_length, zero_flag); 2384 if (ret < 0) 2385 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2386 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2387 cmd->data_length) { 2388 /* 2389 * Special case for COMPARE_AND_WRITE with fabrics 2390 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2391 */ 2392 u32 caw_length = cmd->t_task_nolb * 2393 cmd->se_dev->dev_attrib.block_size; 2394 2395 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2396 &cmd->t_bidi_data_nents, 2397 caw_length, zero_flag); 2398 if (ret < 0) 2399 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2400 } 2401 /* 2402 * If this command is not a write we can execute it right here, 2403 * for write buffers we need to notify the fabric driver first 2404 * and let it call back once the write buffers are ready. 2405 */ 2406 target_add_to_state_list(cmd); 2407 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2408 target_execute_cmd(cmd); 2409 return 0; 2410 } 2411 transport_cmd_check_stop(cmd, false, true); 2412 2413 ret = cmd->se_tfo->write_pending(cmd); 2414 if (ret == -EAGAIN || ret == -ENOMEM) 2415 goto queue_full; 2416 2417 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2418 WARN_ON(ret); 2419 2420 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2421 2422 queue_full: 2423 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2424 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2425 transport_handle_queue_full(cmd, cmd->se_dev); 2426 return 0; 2427 } 2428 EXPORT_SYMBOL(transport_generic_new_cmd); 2429 2430 static void transport_write_pending_qf(struct se_cmd *cmd) 2431 { 2432 int ret; 2433 2434 ret = cmd->se_tfo->write_pending(cmd); 2435 if (ret == -EAGAIN || ret == -ENOMEM) { 2436 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2437 cmd); 2438 transport_handle_queue_full(cmd, cmd->se_dev); 2439 } 2440 } 2441 2442 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2443 { 2444 unsigned long flags; 2445 int ret = 0; 2446 2447 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2448 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2449 transport_wait_for_tasks(cmd); 2450 2451 ret = transport_release_cmd(cmd); 2452 } else { 2453 if (wait_for_tasks) 2454 transport_wait_for_tasks(cmd); 2455 /* 2456 * Handle WRITE failure case where transport_generic_new_cmd() 2457 * has already added se_cmd to state_list, but fabric has 2458 * failed command before I/O submission. 2459 */ 2460 if (cmd->state_active) { 2461 spin_lock_irqsave(&cmd->t_state_lock, flags); 2462 target_remove_from_state_list(cmd); 2463 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2464 } 2465 2466 if (cmd->se_lun) 2467 transport_lun_remove_cmd(cmd); 2468 2469 ret = transport_put_cmd(cmd); 2470 } 2471 return ret; 2472 } 2473 EXPORT_SYMBOL(transport_generic_free_cmd); 2474 2475 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2476 * @se_cmd: command descriptor to add 2477 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2478 */ 2479 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2480 { 2481 struct se_session *se_sess = se_cmd->se_sess; 2482 unsigned long flags; 2483 int ret = 0; 2484 2485 /* 2486 * Add a second kref if the fabric caller is expecting to handle 2487 * fabric acknowledgement that requires two target_put_sess_cmd() 2488 * invocations before se_cmd descriptor release. 2489 */ 2490 if (ack_kref) 2491 kref_get(&se_cmd->cmd_kref); 2492 2493 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2494 if (se_sess->sess_tearing_down) { 2495 ret = -ESHUTDOWN; 2496 goto out; 2497 } 2498 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2499 out: 2500 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2501 2502 if (ret && ack_kref) 2503 target_put_sess_cmd(se_cmd); 2504 2505 return ret; 2506 } 2507 EXPORT_SYMBOL(target_get_sess_cmd); 2508 2509 static void target_release_cmd_kref(struct kref *kref) 2510 __releases(&se_cmd->se_sess->sess_cmd_lock) 2511 { 2512 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2513 struct se_session *se_sess = se_cmd->se_sess; 2514 2515 if (list_empty(&se_cmd->se_cmd_list)) { 2516 spin_unlock(&se_sess->sess_cmd_lock); 2517 se_cmd->se_tfo->release_cmd(se_cmd); 2518 return; 2519 } 2520 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2521 spin_unlock(&se_sess->sess_cmd_lock); 2522 complete(&se_cmd->cmd_wait_comp); 2523 return; 2524 } 2525 list_del(&se_cmd->se_cmd_list); 2526 spin_unlock(&se_sess->sess_cmd_lock); 2527 2528 se_cmd->se_tfo->release_cmd(se_cmd); 2529 } 2530 2531 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 2532 * @se_cmd: command descriptor to drop 2533 */ 2534 int target_put_sess_cmd(struct se_cmd *se_cmd) 2535 { 2536 struct se_session *se_sess = se_cmd->se_sess; 2537 2538 if (!se_sess) { 2539 se_cmd->se_tfo->release_cmd(se_cmd); 2540 return 1; 2541 } 2542 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, 2543 &se_sess->sess_cmd_lock); 2544 } 2545 EXPORT_SYMBOL(target_put_sess_cmd); 2546 2547 /* target_sess_cmd_list_set_waiting - Flag all commands in 2548 * sess_cmd_list to complete cmd_wait_comp. Set 2549 * sess_tearing_down so no more commands are queued. 2550 * @se_sess: session to flag 2551 */ 2552 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2553 { 2554 struct se_cmd *se_cmd; 2555 unsigned long flags; 2556 2557 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2558 if (se_sess->sess_tearing_down) { 2559 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2560 return; 2561 } 2562 se_sess->sess_tearing_down = 1; 2563 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2564 2565 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 2566 se_cmd->cmd_wait_set = 1; 2567 2568 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2569 } 2570 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2571 2572 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2573 * @se_sess: session to wait for active I/O 2574 */ 2575 void target_wait_for_sess_cmds(struct se_session *se_sess) 2576 { 2577 struct se_cmd *se_cmd, *tmp_cmd; 2578 unsigned long flags; 2579 2580 list_for_each_entry_safe(se_cmd, tmp_cmd, 2581 &se_sess->sess_wait_list, se_cmd_list) { 2582 list_del(&se_cmd->se_cmd_list); 2583 2584 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2585 " %d\n", se_cmd, se_cmd->t_state, 2586 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2587 2588 wait_for_completion(&se_cmd->cmd_wait_comp); 2589 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2590 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2591 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2592 2593 se_cmd->se_tfo->release_cmd(se_cmd); 2594 } 2595 2596 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2597 WARN_ON(!list_empty(&se_sess->sess_cmd_list)); 2598 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2599 2600 } 2601 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2602 2603 void transport_clear_lun_ref(struct se_lun *lun) 2604 { 2605 percpu_ref_kill(&lun->lun_ref); 2606 wait_for_completion(&lun->lun_ref_comp); 2607 } 2608 2609 /** 2610 * transport_wait_for_tasks - wait for completion to occur 2611 * @cmd: command to wait 2612 * 2613 * Called from frontend fabric context to wait for storage engine 2614 * to pause and/or release frontend generated struct se_cmd. 2615 */ 2616 bool transport_wait_for_tasks(struct se_cmd *cmd) 2617 { 2618 unsigned long flags; 2619 2620 spin_lock_irqsave(&cmd->t_state_lock, flags); 2621 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2622 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2623 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2624 return false; 2625 } 2626 2627 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2628 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2629 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2630 return false; 2631 } 2632 2633 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2634 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2635 return false; 2636 } 2637 2638 cmd->transport_state |= CMD_T_STOP; 2639 2640 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n", 2641 cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2642 2643 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2644 2645 wait_for_completion(&cmd->t_transport_stop_comp); 2646 2647 spin_lock_irqsave(&cmd->t_state_lock, flags); 2648 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2649 2650 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n", 2651 cmd->tag); 2652 2653 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2654 2655 return true; 2656 } 2657 EXPORT_SYMBOL(transport_wait_for_tasks); 2658 2659 struct sense_info { 2660 u8 key; 2661 u8 asc; 2662 u8 ascq; 2663 bool add_sector_info; 2664 }; 2665 2666 static const struct sense_info sense_info_table[] = { 2667 [TCM_NO_SENSE] = { 2668 .key = NOT_READY 2669 }, 2670 [TCM_NON_EXISTENT_LUN] = { 2671 .key = ILLEGAL_REQUEST, 2672 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 2673 }, 2674 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 2675 .key = ILLEGAL_REQUEST, 2676 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2677 }, 2678 [TCM_SECTOR_COUNT_TOO_MANY] = { 2679 .key = ILLEGAL_REQUEST, 2680 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2681 }, 2682 [TCM_UNKNOWN_MODE_PAGE] = { 2683 .key = ILLEGAL_REQUEST, 2684 .asc = 0x24, /* INVALID FIELD IN CDB */ 2685 }, 2686 [TCM_CHECK_CONDITION_ABORT_CMD] = { 2687 .key = ABORTED_COMMAND, 2688 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 2689 .ascq = 0x03, 2690 }, 2691 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 2692 .key = ABORTED_COMMAND, 2693 .asc = 0x0c, /* WRITE ERROR */ 2694 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 2695 }, 2696 [TCM_INVALID_CDB_FIELD] = { 2697 .key = ILLEGAL_REQUEST, 2698 .asc = 0x24, /* INVALID FIELD IN CDB */ 2699 }, 2700 [TCM_INVALID_PARAMETER_LIST] = { 2701 .key = ILLEGAL_REQUEST, 2702 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 2703 }, 2704 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 2705 .key = ILLEGAL_REQUEST, 2706 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 2707 }, 2708 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 2709 .key = ILLEGAL_REQUEST, 2710 .asc = 0x0c, /* WRITE ERROR */ 2711 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 2712 }, 2713 [TCM_SERVICE_CRC_ERROR] = { 2714 .key = ABORTED_COMMAND, 2715 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 2716 .ascq = 0x05, /* N/A */ 2717 }, 2718 [TCM_SNACK_REJECTED] = { 2719 .key = ABORTED_COMMAND, 2720 .asc = 0x11, /* READ ERROR */ 2721 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 2722 }, 2723 [TCM_WRITE_PROTECTED] = { 2724 .key = DATA_PROTECT, 2725 .asc = 0x27, /* WRITE PROTECTED */ 2726 }, 2727 [TCM_ADDRESS_OUT_OF_RANGE] = { 2728 .key = ILLEGAL_REQUEST, 2729 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2730 }, 2731 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 2732 .key = UNIT_ATTENTION, 2733 }, 2734 [TCM_CHECK_CONDITION_NOT_READY] = { 2735 .key = NOT_READY, 2736 }, 2737 [TCM_MISCOMPARE_VERIFY] = { 2738 .key = MISCOMPARE, 2739 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 2740 .ascq = 0x00, 2741 }, 2742 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 2743 .key = ABORTED_COMMAND, 2744 .asc = 0x10, 2745 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 2746 .add_sector_info = true, 2747 }, 2748 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 2749 .key = ABORTED_COMMAND, 2750 .asc = 0x10, 2751 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 2752 .add_sector_info = true, 2753 }, 2754 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 2755 .key = ABORTED_COMMAND, 2756 .asc = 0x10, 2757 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 2758 .add_sector_info = true, 2759 }, 2760 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 2761 /* 2762 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2763 * Solaris initiators. Returning NOT READY instead means the 2764 * operations will be retried a finite number of times and we 2765 * can survive intermittent errors. 2766 */ 2767 .key = NOT_READY, 2768 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 2769 }, 2770 }; 2771 2772 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 2773 { 2774 const struct sense_info *si; 2775 u8 *buffer = cmd->sense_buffer; 2776 int r = (__force int)reason; 2777 u8 asc, ascq; 2778 bool desc_format = target_sense_desc_format(cmd->se_dev); 2779 2780 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 2781 si = &sense_info_table[r]; 2782 else 2783 si = &sense_info_table[(__force int) 2784 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 2785 2786 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 2787 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2788 WARN_ON_ONCE(asc == 0); 2789 } else if (si->asc == 0) { 2790 WARN_ON_ONCE(cmd->scsi_asc == 0); 2791 asc = cmd->scsi_asc; 2792 ascq = cmd->scsi_ascq; 2793 } else { 2794 asc = si->asc; 2795 ascq = si->ascq; 2796 } 2797 2798 scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq); 2799 if (si->add_sector_info) 2800 return scsi_set_sense_information(buffer, 2801 cmd->scsi_sense_length, 2802 cmd->bad_sector); 2803 2804 return 0; 2805 } 2806 2807 int 2808 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2809 sense_reason_t reason, int from_transport) 2810 { 2811 unsigned long flags; 2812 2813 spin_lock_irqsave(&cmd->t_state_lock, flags); 2814 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2815 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2816 return 0; 2817 } 2818 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2819 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2820 2821 if (!from_transport) { 2822 int rc; 2823 2824 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2825 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 2826 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 2827 rc = translate_sense_reason(cmd, reason); 2828 if (rc) 2829 return rc; 2830 } 2831 2832 trace_target_cmd_complete(cmd); 2833 return cmd->se_tfo->queue_status(cmd); 2834 } 2835 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2836 2837 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2838 { 2839 if (!(cmd->transport_state & CMD_T_ABORTED)) 2840 return 0; 2841 2842 /* 2843 * If cmd has been aborted but either no status is to be sent or it has 2844 * already been sent, just return 2845 */ 2846 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) 2847 return 1; 2848 2849 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", 2850 cmd->t_task_cdb[0], cmd->tag); 2851 2852 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; 2853 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2854 trace_target_cmd_complete(cmd); 2855 cmd->se_tfo->queue_status(cmd); 2856 2857 return 1; 2858 } 2859 EXPORT_SYMBOL(transport_check_aborted_status); 2860 2861 void transport_send_task_abort(struct se_cmd *cmd) 2862 { 2863 unsigned long flags; 2864 2865 spin_lock_irqsave(&cmd->t_state_lock, flags); 2866 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 2867 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2868 return; 2869 } 2870 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2871 2872 /* 2873 * If there are still expected incoming fabric WRITEs, we wait 2874 * until until they have completed before sending a TASK_ABORTED 2875 * response. This response with TASK_ABORTED status will be 2876 * queued back to fabric module by transport_check_aborted_status(). 2877 */ 2878 if (cmd->data_direction == DMA_TO_DEVICE) { 2879 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2880 cmd->transport_state |= CMD_T_ABORTED; 2881 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2882 return; 2883 } 2884 } 2885 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2886 2887 transport_lun_remove_cmd(cmd); 2888 2889 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 2890 cmd->t_task_cdb[0], cmd->tag); 2891 2892 trace_target_cmd_complete(cmd); 2893 cmd->se_tfo->queue_status(cmd); 2894 } 2895 2896 static void target_tmr_work(struct work_struct *work) 2897 { 2898 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2899 struct se_device *dev = cmd->se_dev; 2900 struct se_tmr_req *tmr = cmd->se_tmr_req; 2901 int ret; 2902 2903 switch (tmr->function) { 2904 case TMR_ABORT_TASK: 2905 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2906 break; 2907 case TMR_ABORT_TASK_SET: 2908 case TMR_CLEAR_ACA: 2909 case TMR_CLEAR_TASK_SET: 2910 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2911 break; 2912 case TMR_LUN_RESET: 2913 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 2914 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 2915 TMR_FUNCTION_REJECTED; 2916 if (tmr->response == TMR_FUNCTION_COMPLETE) { 2917 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 2918 cmd->orig_fe_lun, 0x29, 2919 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 2920 } 2921 break; 2922 case TMR_TARGET_WARM_RESET: 2923 tmr->response = TMR_FUNCTION_REJECTED; 2924 break; 2925 case TMR_TARGET_COLD_RESET: 2926 tmr->response = TMR_FUNCTION_REJECTED; 2927 break; 2928 default: 2929 pr_err("Uknown TMR function: 0x%02x.\n", 2930 tmr->function); 2931 tmr->response = TMR_FUNCTION_REJECTED; 2932 break; 2933 } 2934 2935 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 2936 cmd->se_tfo->queue_tm_rsp(cmd); 2937 2938 transport_cmd_check_stop_to_fabric(cmd); 2939 } 2940 2941 int transport_generic_handle_tmr( 2942 struct se_cmd *cmd) 2943 { 2944 unsigned long flags; 2945 2946 spin_lock_irqsave(&cmd->t_state_lock, flags); 2947 cmd->transport_state |= CMD_T_ACTIVE; 2948 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2949 2950 INIT_WORK(&cmd->work, target_tmr_work); 2951 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 2952 return 0; 2953 } 2954 EXPORT_SYMBOL(transport_generic_handle_tmr); 2955 2956 bool 2957 target_check_wce(struct se_device *dev) 2958 { 2959 bool wce = false; 2960 2961 if (dev->transport->get_write_cache) 2962 wce = dev->transport->get_write_cache(dev); 2963 else if (dev->dev_attrib.emulate_write_cache > 0) 2964 wce = true; 2965 2966 return wce; 2967 } 2968 2969 bool 2970 target_check_fua(struct se_device *dev) 2971 { 2972 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 2973 } 2974