1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <linux/vmalloc.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi_proto.h> 42 #include <scsi/scsi_common.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 48 #include "target_core_internal.h" 49 #include "target_core_alua.h" 50 #include "target_core_pr.h" 51 #include "target_core_ua.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/target.h> 55 56 static struct workqueue_struct *target_completion_wq; 57 static struct kmem_cache *se_sess_cache; 58 struct kmem_cache *se_ua_cache; 59 struct kmem_cache *t10_pr_reg_cache; 60 struct kmem_cache *t10_alua_lu_gp_cache; 61 struct kmem_cache *t10_alua_lu_gp_mem_cache; 62 struct kmem_cache *t10_alua_tg_pt_gp_cache; 63 struct kmem_cache *t10_alua_lba_map_cache; 64 struct kmem_cache *t10_alua_lba_map_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void transport_handle_queue_full(struct se_cmd *cmd, 68 struct se_device *dev); 69 static int transport_put_cmd(struct se_cmd *cmd); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_lba_map_cache = kmem_cache_create( 122 "t10_alua_lba_map_cache", 123 sizeof(struct t10_alua_lba_map), 124 __alignof__(struct t10_alua_lba_map), 0, NULL); 125 if (!t10_alua_lba_map_cache) { 126 pr_err("kmem_cache_create() for t10_alua_lba_map_" 127 "cache failed\n"); 128 goto out_free_tg_pt_gp_cache; 129 } 130 t10_alua_lba_map_mem_cache = kmem_cache_create( 131 "t10_alua_lba_map_mem_cache", 132 sizeof(struct t10_alua_lba_map_member), 133 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 134 if (!t10_alua_lba_map_mem_cache) { 135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 136 "cache failed\n"); 137 goto out_free_lba_map_cache; 138 } 139 140 target_completion_wq = alloc_workqueue("target_completion", 141 WQ_MEM_RECLAIM, 0); 142 if (!target_completion_wq) 143 goto out_free_lba_map_mem_cache; 144 145 return 0; 146 147 out_free_lba_map_mem_cache: 148 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 149 out_free_lba_map_cache: 150 kmem_cache_destroy(t10_alua_lba_map_cache); 151 out_free_tg_pt_gp_cache: 152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 153 out_free_lu_gp_mem_cache: 154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 155 out_free_lu_gp_cache: 156 kmem_cache_destroy(t10_alua_lu_gp_cache); 157 out_free_pr_reg_cache: 158 kmem_cache_destroy(t10_pr_reg_cache); 159 out_free_ua_cache: 160 kmem_cache_destroy(se_ua_cache); 161 out_free_sess_cache: 162 kmem_cache_destroy(se_sess_cache); 163 out: 164 return -ENOMEM; 165 } 166 167 void release_se_kmem_caches(void) 168 { 169 destroy_workqueue(target_completion_wq); 170 kmem_cache_destroy(se_sess_cache); 171 kmem_cache_destroy(se_ua_cache); 172 kmem_cache_destroy(t10_pr_reg_cache); 173 kmem_cache_destroy(t10_alua_lu_gp_cache); 174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 176 kmem_cache_destroy(t10_alua_lba_map_cache); 177 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 178 } 179 180 /* This code ensures unique mib indexes are handed out. */ 181 static DEFINE_SPINLOCK(scsi_mib_index_lock); 182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 183 184 /* 185 * Allocate a new row index for the entry type specified 186 */ 187 u32 scsi_get_new_index(scsi_index_t type) 188 { 189 u32 new_index; 190 191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 192 193 spin_lock(&scsi_mib_index_lock); 194 new_index = ++scsi_mib_index[type]; 195 spin_unlock(&scsi_mib_index_lock); 196 197 return new_index; 198 } 199 200 void transport_subsystem_check_init(void) 201 { 202 int ret; 203 static int sub_api_initialized; 204 205 if (sub_api_initialized) 206 return; 207 208 ret = request_module("target_core_iblock"); 209 if (ret != 0) 210 pr_err("Unable to load target_core_iblock\n"); 211 212 ret = request_module("target_core_file"); 213 if (ret != 0) 214 pr_err("Unable to load target_core_file\n"); 215 216 ret = request_module("target_core_pscsi"); 217 if (ret != 0) 218 pr_err("Unable to load target_core_pscsi\n"); 219 220 ret = request_module("target_core_user"); 221 if (ret != 0) 222 pr_err("Unable to load target_core_user\n"); 223 224 sub_api_initialized = 1; 225 } 226 227 struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) 228 { 229 struct se_session *se_sess; 230 231 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 232 if (!se_sess) { 233 pr_err("Unable to allocate struct se_session from" 234 " se_sess_cache\n"); 235 return ERR_PTR(-ENOMEM); 236 } 237 INIT_LIST_HEAD(&se_sess->sess_list); 238 INIT_LIST_HEAD(&se_sess->sess_acl_list); 239 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 240 INIT_LIST_HEAD(&se_sess->sess_wait_list); 241 spin_lock_init(&se_sess->sess_cmd_lock); 242 kref_init(&se_sess->sess_kref); 243 se_sess->sup_prot_ops = sup_prot_ops; 244 245 return se_sess; 246 } 247 EXPORT_SYMBOL(transport_init_session); 248 249 int transport_alloc_session_tags(struct se_session *se_sess, 250 unsigned int tag_num, unsigned int tag_size) 251 { 252 int rc; 253 254 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, 255 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 256 if (!se_sess->sess_cmd_map) { 257 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); 258 if (!se_sess->sess_cmd_map) { 259 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 260 return -ENOMEM; 261 } 262 } 263 264 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 265 if (rc < 0) { 266 pr_err("Unable to init se_sess->sess_tag_pool," 267 " tag_num: %u\n", tag_num); 268 kvfree(se_sess->sess_cmd_map); 269 se_sess->sess_cmd_map = NULL; 270 return -ENOMEM; 271 } 272 273 return 0; 274 } 275 EXPORT_SYMBOL(transport_alloc_session_tags); 276 277 struct se_session *transport_init_session_tags(unsigned int tag_num, 278 unsigned int tag_size, 279 enum target_prot_op sup_prot_ops) 280 { 281 struct se_session *se_sess; 282 int rc; 283 284 se_sess = transport_init_session(sup_prot_ops); 285 if (IS_ERR(se_sess)) 286 return se_sess; 287 288 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 289 if (rc < 0) { 290 transport_free_session(se_sess); 291 return ERR_PTR(-ENOMEM); 292 } 293 294 return se_sess; 295 } 296 EXPORT_SYMBOL(transport_init_session_tags); 297 298 /* 299 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 300 */ 301 void __transport_register_session( 302 struct se_portal_group *se_tpg, 303 struct se_node_acl *se_nacl, 304 struct se_session *se_sess, 305 void *fabric_sess_ptr) 306 { 307 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 308 unsigned char buf[PR_REG_ISID_LEN]; 309 310 se_sess->se_tpg = se_tpg; 311 se_sess->fabric_sess_ptr = fabric_sess_ptr; 312 /* 313 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 314 * 315 * Only set for struct se_session's that will actually be moving I/O. 316 * eg: *NOT* discovery sessions. 317 */ 318 if (se_nacl) { 319 /* 320 * 321 * Determine if fabric allows for T10-PI feature bits exposed to 322 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 323 * 324 * If so, then always save prot_type on a per se_node_acl node 325 * basis and re-instate the previous sess_prot_type to avoid 326 * disabling PI from below any previously initiator side 327 * registered LUNs. 328 */ 329 if (se_nacl->saved_prot_type) 330 se_sess->sess_prot_type = se_nacl->saved_prot_type; 331 else if (tfo->tpg_check_prot_fabric_only) 332 se_sess->sess_prot_type = se_nacl->saved_prot_type = 333 tfo->tpg_check_prot_fabric_only(se_tpg); 334 /* 335 * If the fabric module supports an ISID based TransportID, 336 * save this value in binary from the fabric I_T Nexus now. 337 */ 338 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 339 memset(&buf[0], 0, PR_REG_ISID_LEN); 340 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 341 &buf[0], PR_REG_ISID_LEN); 342 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 343 } 344 kref_get(&se_nacl->acl_kref); 345 346 spin_lock_irq(&se_nacl->nacl_sess_lock); 347 /* 348 * The se_nacl->nacl_sess pointer will be set to the 349 * last active I_T Nexus for each struct se_node_acl. 350 */ 351 se_nacl->nacl_sess = se_sess; 352 353 list_add_tail(&se_sess->sess_acl_list, 354 &se_nacl->acl_sess_list); 355 spin_unlock_irq(&se_nacl->nacl_sess_lock); 356 } 357 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 358 359 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 360 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 361 } 362 EXPORT_SYMBOL(__transport_register_session); 363 364 void transport_register_session( 365 struct se_portal_group *se_tpg, 366 struct se_node_acl *se_nacl, 367 struct se_session *se_sess, 368 void *fabric_sess_ptr) 369 { 370 unsigned long flags; 371 372 spin_lock_irqsave(&se_tpg->session_lock, flags); 373 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 374 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 375 } 376 EXPORT_SYMBOL(transport_register_session); 377 378 static void target_release_session(struct kref *kref) 379 { 380 struct se_session *se_sess = container_of(kref, 381 struct se_session, sess_kref); 382 struct se_portal_group *se_tpg = se_sess->se_tpg; 383 384 se_tpg->se_tpg_tfo->close_session(se_sess); 385 } 386 387 void target_get_session(struct se_session *se_sess) 388 { 389 kref_get(&se_sess->sess_kref); 390 } 391 EXPORT_SYMBOL(target_get_session); 392 393 void target_put_session(struct se_session *se_sess) 394 { 395 kref_put(&se_sess->sess_kref, target_release_session); 396 } 397 EXPORT_SYMBOL(target_put_session); 398 399 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 400 { 401 struct se_session *se_sess; 402 ssize_t len = 0; 403 404 spin_lock_bh(&se_tpg->session_lock); 405 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 406 if (!se_sess->se_node_acl) 407 continue; 408 if (!se_sess->se_node_acl->dynamic_node_acl) 409 continue; 410 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 411 break; 412 413 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 414 se_sess->se_node_acl->initiatorname); 415 len += 1; /* Include NULL terminator */ 416 } 417 spin_unlock_bh(&se_tpg->session_lock); 418 419 return len; 420 } 421 EXPORT_SYMBOL(target_show_dynamic_sessions); 422 423 static void target_complete_nacl(struct kref *kref) 424 { 425 struct se_node_acl *nacl = container_of(kref, 426 struct se_node_acl, acl_kref); 427 428 complete(&nacl->acl_free_comp); 429 } 430 431 void target_put_nacl(struct se_node_acl *nacl) 432 { 433 kref_put(&nacl->acl_kref, target_complete_nacl); 434 } 435 436 void transport_deregister_session_configfs(struct se_session *se_sess) 437 { 438 struct se_node_acl *se_nacl; 439 unsigned long flags; 440 /* 441 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 442 */ 443 se_nacl = se_sess->se_node_acl; 444 if (se_nacl) { 445 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 446 if (se_nacl->acl_stop == 0) 447 list_del(&se_sess->sess_acl_list); 448 /* 449 * If the session list is empty, then clear the pointer. 450 * Otherwise, set the struct se_session pointer from the tail 451 * element of the per struct se_node_acl active session list. 452 */ 453 if (list_empty(&se_nacl->acl_sess_list)) 454 se_nacl->nacl_sess = NULL; 455 else { 456 se_nacl->nacl_sess = container_of( 457 se_nacl->acl_sess_list.prev, 458 struct se_session, sess_acl_list); 459 } 460 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 461 } 462 } 463 EXPORT_SYMBOL(transport_deregister_session_configfs); 464 465 void transport_free_session(struct se_session *se_sess) 466 { 467 if (se_sess->sess_cmd_map) { 468 percpu_ida_destroy(&se_sess->sess_tag_pool); 469 kvfree(se_sess->sess_cmd_map); 470 } 471 kmem_cache_free(se_sess_cache, se_sess); 472 } 473 EXPORT_SYMBOL(transport_free_session); 474 475 void transport_deregister_session(struct se_session *se_sess) 476 { 477 struct se_portal_group *se_tpg = se_sess->se_tpg; 478 const struct target_core_fabric_ops *se_tfo; 479 struct se_node_acl *se_nacl; 480 unsigned long flags; 481 bool comp_nacl = true, drop_nacl = false; 482 483 if (!se_tpg) { 484 transport_free_session(se_sess); 485 return; 486 } 487 se_tfo = se_tpg->se_tpg_tfo; 488 489 spin_lock_irqsave(&se_tpg->session_lock, flags); 490 list_del(&se_sess->sess_list); 491 se_sess->se_tpg = NULL; 492 se_sess->fabric_sess_ptr = NULL; 493 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 494 495 /* 496 * Determine if we need to do extra work for this initiator node's 497 * struct se_node_acl if it had been previously dynamically generated. 498 */ 499 se_nacl = se_sess->se_node_acl; 500 501 mutex_lock(&se_tpg->acl_node_mutex); 502 if (se_nacl && se_nacl->dynamic_node_acl) { 503 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 504 list_del(&se_nacl->acl_list); 505 se_tpg->num_node_acls--; 506 drop_nacl = true; 507 } 508 } 509 mutex_unlock(&se_tpg->acl_node_mutex); 510 511 if (drop_nacl) { 512 core_tpg_wait_for_nacl_pr_ref(se_nacl); 513 core_free_device_list_for_node(se_nacl, se_tpg); 514 kfree(se_nacl); 515 comp_nacl = false; 516 } 517 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 518 se_tpg->se_tpg_tfo->get_fabric_name()); 519 /* 520 * If last kref is dropping now for an explicit NodeACL, awake sleeping 521 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 522 * removal context. 523 */ 524 if (se_nacl && comp_nacl) 525 target_put_nacl(se_nacl); 526 527 transport_free_session(se_sess); 528 } 529 EXPORT_SYMBOL(transport_deregister_session); 530 531 /* 532 * Called with cmd->t_state_lock held. 533 */ 534 static void target_remove_from_state_list(struct se_cmd *cmd) 535 { 536 struct se_device *dev = cmd->se_dev; 537 unsigned long flags; 538 539 if (!dev) 540 return; 541 542 if (cmd->transport_state & CMD_T_BUSY) 543 return; 544 545 spin_lock_irqsave(&dev->execute_task_lock, flags); 546 if (cmd->state_active) { 547 list_del(&cmd->state_list); 548 cmd->state_active = false; 549 } 550 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 551 } 552 553 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, 554 bool write_pending) 555 { 556 unsigned long flags; 557 558 spin_lock_irqsave(&cmd->t_state_lock, flags); 559 if (write_pending) 560 cmd->t_state = TRANSPORT_WRITE_PENDING; 561 562 if (remove_from_lists) { 563 target_remove_from_state_list(cmd); 564 565 /* 566 * Clear struct se_cmd->se_lun before the handoff to FE. 567 */ 568 cmd->se_lun = NULL; 569 } 570 571 /* 572 * Determine if frontend context caller is requesting the stopping of 573 * this command for frontend exceptions. 574 */ 575 if (cmd->transport_state & CMD_T_STOP) { 576 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 577 __func__, __LINE__, cmd->tag); 578 579 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 580 581 complete_all(&cmd->t_transport_stop_comp); 582 return 1; 583 } 584 585 cmd->transport_state &= ~CMD_T_ACTIVE; 586 if (remove_from_lists) { 587 /* 588 * Some fabric modules like tcm_loop can release 589 * their internally allocated I/O reference now and 590 * struct se_cmd now. 591 * 592 * Fabric modules are expected to return '1' here if the 593 * se_cmd being passed is released at this point, 594 * or zero if not being released. 595 */ 596 if (cmd->se_tfo->check_stop_free != NULL) { 597 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 598 return cmd->se_tfo->check_stop_free(cmd); 599 } 600 } 601 602 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 603 return 0; 604 } 605 606 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 607 { 608 return transport_cmd_check_stop(cmd, true, false); 609 } 610 611 static void transport_lun_remove_cmd(struct se_cmd *cmd) 612 { 613 struct se_lun *lun = cmd->se_lun; 614 615 if (!lun) 616 return; 617 618 if (cmpxchg(&cmd->lun_ref_active, true, false)) 619 percpu_ref_put(&lun->lun_ref); 620 } 621 622 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 623 { 624 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 625 transport_lun_remove_cmd(cmd); 626 /* 627 * Allow the fabric driver to unmap any resources before 628 * releasing the descriptor via TFO->release_cmd() 629 */ 630 if (remove) 631 cmd->se_tfo->aborted_task(cmd); 632 633 if (transport_cmd_check_stop_to_fabric(cmd)) 634 return; 635 if (remove) 636 transport_put_cmd(cmd); 637 } 638 639 static void target_complete_failure_work(struct work_struct *work) 640 { 641 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 642 643 transport_generic_request_failure(cmd, 644 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 645 } 646 647 /* 648 * Used when asking transport to copy Sense Data from the underlying 649 * Linux/SCSI struct scsi_cmnd 650 */ 651 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 652 { 653 struct se_device *dev = cmd->se_dev; 654 655 WARN_ON(!cmd->se_lun); 656 657 if (!dev) 658 return NULL; 659 660 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 661 return NULL; 662 663 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 664 665 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 666 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 667 return cmd->sense_buffer; 668 } 669 670 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 671 { 672 struct se_device *dev = cmd->se_dev; 673 int success = scsi_status == GOOD; 674 unsigned long flags; 675 676 cmd->scsi_status = scsi_status; 677 678 679 spin_lock_irqsave(&cmd->t_state_lock, flags); 680 cmd->transport_state &= ~CMD_T_BUSY; 681 682 if (dev && dev->transport->transport_complete) { 683 dev->transport->transport_complete(cmd, 684 cmd->t_data_sg, 685 transport_get_sense_buffer(cmd)); 686 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 687 success = 1; 688 } 689 690 /* 691 * See if we are waiting to complete for an exception condition. 692 */ 693 if (cmd->transport_state & CMD_T_REQUEST_STOP) { 694 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 695 complete(&cmd->task_stop_comp); 696 return; 697 } 698 699 /* 700 * Check for case where an explicit ABORT_TASK has been received 701 * and transport_wait_for_tasks() will be waiting for completion.. 702 */ 703 if (cmd->transport_state & CMD_T_ABORTED && 704 cmd->transport_state & CMD_T_STOP) { 705 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 706 complete_all(&cmd->t_transport_stop_comp); 707 return; 708 } else if (!success) { 709 INIT_WORK(&cmd->work, target_complete_failure_work); 710 } else { 711 INIT_WORK(&cmd->work, target_complete_ok_work); 712 } 713 714 cmd->t_state = TRANSPORT_COMPLETE; 715 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 716 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 717 718 queue_work(target_completion_wq, &cmd->work); 719 } 720 EXPORT_SYMBOL(target_complete_cmd); 721 722 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 723 { 724 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { 725 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 726 cmd->residual_count += cmd->data_length - length; 727 } else { 728 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 729 cmd->residual_count = cmd->data_length - length; 730 } 731 732 cmd->data_length = length; 733 } 734 735 target_complete_cmd(cmd, scsi_status); 736 } 737 EXPORT_SYMBOL(target_complete_cmd_with_length); 738 739 static void target_add_to_state_list(struct se_cmd *cmd) 740 { 741 struct se_device *dev = cmd->se_dev; 742 unsigned long flags; 743 744 spin_lock_irqsave(&dev->execute_task_lock, flags); 745 if (!cmd->state_active) { 746 list_add_tail(&cmd->state_list, &dev->state_list); 747 cmd->state_active = true; 748 } 749 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 750 } 751 752 /* 753 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 754 */ 755 static void transport_write_pending_qf(struct se_cmd *cmd); 756 static void transport_complete_qf(struct se_cmd *cmd); 757 758 void target_qf_do_work(struct work_struct *work) 759 { 760 struct se_device *dev = container_of(work, struct se_device, 761 qf_work_queue); 762 LIST_HEAD(qf_cmd_list); 763 struct se_cmd *cmd, *cmd_tmp; 764 765 spin_lock_irq(&dev->qf_cmd_lock); 766 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 767 spin_unlock_irq(&dev->qf_cmd_lock); 768 769 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 770 list_del(&cmd->se_qf_node); 771 atomic_dec_mb(&dev->dev_qf_count); 772 773 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 774 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 775 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 776 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 777 : "UNKNOWN"); 778 779 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 780 transport_write_pending_qf(cmd); 781 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 782 transport_complete_qf(cmd); 783 } 784 } 785 786 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 787 { 788 switch (cmd->data_direction) { 789 case DMA_NONE: 790 return "NONE"; 791 case DMA_FROM_DEVICE: 792 return "READ"; 793 case DMA_TO_DEVICE: 794 return "WRITE"; 795 case DMA_BIDIRECTIONAL: 796 return "BIDI"; 797 default: 798 break; 799 } 800 801 return "UNKNOWN"; 802 } 803 804 void transport_dump_dev_state( 805 struct se_device *dev, 806 char *b, 807 int *bl) 808 { 809 *bl += sprintf(b + *bl, "Status: "); 810 if (dev->export_count) 811 *bl += sprintf(b + *bl, "ACTIVATED"); 812 else 813 *bl += sprintf(b + *bl, "DEACTIVATED"); 814 815 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 816 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 817 dev->dev_attrib.block_size, 818 dev->dev_attrib.hw_max_sectors); 819 *bl += sprintf(b + *bl, " "); 820 } 821 822 void transport_dump_vpd_proto_id( 823 struct t10_vpd *vpd, 824 unsigned char *p_buf, 825 int p_buf_len) 826 { 827 unsigned char buf[VPD_TMP_BUF_SIZE]; 828 int len; 829 830 memset(buf, 0, VPD_TMP_BUF_SIZE); 831 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 832 833 switch (vpd->protocol_identifier) { 834 case 0x00: 835 sprintf(buf+len, "Fibre Channel\n"); 836 break; 837 case 0x10: 838 sprintf(buf+len, "Parallel SCSI\n"); 839 break; 840 case 0x20: 841 sprintf(buf+len, "SSA\n"); 842 break; 843 case 0x30: 844 sprintf(buf+len, "IEEE 1394\n"); 845 break; 846 case 0x40: 847 sprintf(buf+len, "SCSI Remote Direct Memory Access" 848 " Protocol\n"); 849 break; 850 case 0x50: 851 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 852 break; 853 case 0x60: 854 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 855 break; 856 case 0x70: 857 sprintf(buf+len, "Automation/Drive Interface Transport" 858 " Protocol\n"); 859 break; 860 case 0x80: 861 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 862 break; 863 default: 864 sprintf(buf+len, "Unknown 0x%02x\n", 865 vpd->protocol_identifier); 866 break; 867 } 868 869 if (p_buf) 870 strncpy(p_buf, buf, p_buf_len); 871 else 872 pr_debug("%s", buf); 873 } 874 875 void 876 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 877 { 878 /* 879 * Check if the Protocol Identifier Valid (PIV) bit is set.. 880 * 881 * from spc3r23.pdf section 7.5.1 882 */ 883 if (page_83[1] & 0x80) { 884 vpd->protocol_identifier = (page_83[0] & 0xf0); 885 vpd->protocol_identifier_set = 1; 886 transport_dump_vpd_proto_id(vpd, NULL, 0); 887 } 888 } 889 EXPORT_SYMBOL(transport_set_vpd_proto_id); 890 891 int transport_dump_vpd_assoc( 892 struct t10_vpd *vpd, 893 unsigned char *p_buf, 894 int p_buf_len) 895 { 896 unsigned char buf[VPD_TMP_BUF_SIZE]; 897 int ret = 0; 898 int len; 899 900 memset(buf, 0, VPD_TMP_BUF_SIZE); 901 len = sprintf(buf, "T10 VPD Identifier Association: "); 902 903 switch (vpd->association) { 904 case 0x00: 905 sprintf(buf+len, "addressed logical unit\n"); 906 break; 907 case 0x10: 908 sprintf(buf+len, "target port\n"); 909 break; 910 case 0x20: 911 sprintf(buf+len, "SCSI target device\n"); 912 break; 913 default: 914 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 915 ret = -EINVAL; 916 break; 917 } 918 919 if (p_buf) 920 strncpy(p_buf, buf, p_buf_len); 921 else 922 pr_debug("%s", buf); 923 924 return ret; 925 } 926 927 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 928 { 929 /* 930 * The VPD identification association.. 931 * 932 * from spc3r23.pdf Section 7.6.3.1 Table 297 933 */ 934 vpd->association = (page_83[1] & 0x30); 935 return transport_dump_vpd_assoc(vpd, NULL, 0); 936 } 937 EXPORT_SYMBOL(transport_set_vpd_assoc); 938 939 int transport_dump_vpd_ident_type( 940 struct t10_vpd *vpd, 941 unsigned char *p_buf, 942 int p_buf_len) 943 { 944 unsigned char buf[VPD_TMP_BUF_SIZE]; 945 int ret = 0; 946 int len; 947 948 memset(buf, 0, VPD_TMP_BUF_SIZE); 949 len = sprintf(buf, "T10 VPD Identifier Type: "); 950 951 switch (vpd->device_identifier_type) { 952 case 0x00: 953 sprintf(buf+len, "Vendor specific\n"); 954 break; 955 case 0x01: 956 sprintf(buf+len, "T10 Vendor ID based\n"); 957 break; 958 case 0x02: 959 sprintf(buf+len, "EUI-64 based\n"); 960 break; 961 case 0x03: 962 sprintf(buf+len, "NAA\n"); 963 break; 964 case 0x04: 965 sprintf(buf+len, "Relative target port identifier\n"); 966 break; 967 case 0x08: 968 sprintf(buf+len, "SCSI name string\n"); 969 break; 970 default: 971 sprintf(buf+len, "Unsupported: 0x%02x\n", 972 vpd->device_identifier_type); 973 ret = -EINVAL; 974 break; 975 } 976 977 if (p_buf) { 978 if (p_buf_len < strlen(buf)+1) 979 return -EINVAL; 980 strncpy(p_buf, buf, p_buf_len); 981 } else { 982 pr_debug("%s", buf); 983 } 984 985 return ret; 986 } 987 988 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 989 { 990 /* 991 * The VPD identifier type.. 992 * 993 * from spc3r23.pdf Section 7.6.3.1 Table 298 994 */ 995 vpd->device_identifier_type = (page_83[1] & 0x0f); 996 return transport_dump_vpd_ident_type(vpd, NULL, 0); 997 } 998 EXPORT_SYMBOL(transport_set_vpd_ident_type); 999 1000 int transport_dump_vpd_ident( 1001 struct t10_vpd *vpd, 1002 unsigned char *p_buf, 1003 int p_buf_len) 1004 { 1005 unsigned char buf[VPD_TMP_BUF_SIZE]; 1006 int ret = 0; 1007 1008 memset(buf, 0, VPD_TMP_BUF_SIZE); 1009 1010 switch (vpd->device_identifier_code_set) { 1011 case 0x01: /* Binary */ 1012 snprintf(buf, sizeof(buf), 1013 "T10 VPD Binary Device Identifier: %s\n", 1014 &vpd->device_identifier[0]); 1015 break; 1016 case 0x02: /* ASCII */ 1017 snprintf(buf, sizeof(buf), 1018 "T10 VPD ASCII Device Identifier: %s\n", 1019 &vpd->device_identifier[0]); 1020 break; 1021 case 0x03: /* UTF-8 */ 1022 snprintf(buf, sizeof(buf), 1023 "T10 VPD UTF-8 Device Identifier: %s\n", 1024 &vpd->device_identifier[0]); 1025 break; 1026 default: 1027 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1028 " 0x%02x", vpd->device_identifier_code_set); 1029 ret = -EINVAL; 1030 break; 1031 } 1032 1033 if (p_buf) 1034 strncpy(p_buf, buf, p_buf_len); 1035 else 1036 pr_debug("%s", buf); 1037 1038 return ret; 1039 } 1040 1041 int 1042 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1043 { 1044 static const char hex_str[] = "0123456789abcdef"; 1045 int j = 0, i = 4; /* offset to start of the identifier */ 1046 1047 /* 1048 * The VPD Code Set (encoding) 1049 * 1050 * from spc3r23.pdf Section 7.6.3.1 Table 296 1051 */ 1052 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1053 switch (vpd->device_identifier_code_set) { 1054 case 0x01: /* Binary */ 1055 vpd->device_identifier[j++] = 1056 hex_str[vpd->device_identifier_type]; 1057 while (i < (4 + page_83[3])) { 1058 vpd->device_identifier[j++] = 1059 hex_str[(page_83[i] & 0xf0) >> 4]; 1060 vpd->device_identifier[j++] = 1061 hex_str[page_83[i] & 0x0f]; 1062 i++; 1063 } 1064 break; 1065 case 0x02: /* ASCII */ 1066 case 0x03: /* UTF-8 */ 1067 while (i < (4 + page_83[3])) 1068 vpd->device_identifier[j++] = page_83[i++]; 1069 break; 1070 default: 1071 break; 1072 } 1073 1074 return transport_dump_vpd_ident(vpd, NULL, 0); 1075 } 1076 EXPORT_SYMBOL(transport_set_vpd_ident); 1077 1078 static sense_reason_t 1079 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1080 unsigned int size) 1081 { 1082 u32 mtl; 1083 1084 if (!cmd->se_tfo->max_data_sg_nents) 1085 return TCM_NO_SENSE; 1086 /* 1087 * Check if fabric enforced maximum SGL entries per I/O descriptor 1088 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1089 * residual_count and reduce original cmd->data_length to maximum 1090 * length based on single PAGE_SIZE entry scatter-lists. 1091 */ 1092 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1093 if (cmd->data_length > mtl) { 1094 /* 1095 * If an existing CDB overflow is present, calculate new residual 1096 * based on CDB size minus fabric maximum transfer length. 1097 * 1098 * If an existing CDB underflow is present, calculate new residual 1099 * based on original cmd->data_length minus fabric maximum transfer 1100 * length. 1101 * 1102 * Otherwise, set the underflow residual based on cmd->data_length 1103 * minus fabric maximum transfer length. 1104 */ 1105 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1106 cmd->residual_count = (size - mtl); 1107 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1108 u32 orig_dl = size + cmd->residual_count; 1109 cmd->residual_count = (orig_dl - mtl); 1110 } else { 1111 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1112 cmd->residual_count = (cmd->data_length - mtl); 1113 } 1114 cmd->data_length = mtl; 1115 /* 1116 * Reset sbc_check_prot() calculated protection payload 1117 * length based upon the new smaller MTL. 1118 */ 1119 if (cmd->prot_length) { 1120 u32 sectors = (mtl / dev->dev_attrib.block_size); 1121 cmd->prot_length = dev->prot_length * sectors; 1122 } 1123 } 1124 return TCM_NO_SENSE; 1125 } 1126 1127 sense_reason_t 1128 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1129 { 1130 struct se_device *dev = cmd->se_dev; 1131 1132 if (cmd->unknown_data_length) { 1133 cmd->data_length = size; 1134 } else if (size != cmd->data_length) { 1135 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1136 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1137 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1138 cmd->data_length, size, cmd->t_task_cdb[0]); 1139 1140 if (cmd->data_direction == DMA_TO_DEVICE && 1141 cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1142 pr_err("Rejecting underflow/overflow WRITE data\n"); 1143 return TCM_INVALID_CDB_FIELD; 1144 } 1145 /* 1146 * Reject READ_* or WRITE_* with overflow/underflow for 1147 * type SCF_SCSI_DATA_CDB. 1148 */ 1149 if (dev->dev_attrib.block_size != 512) { 1150 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1151 " CDB on non 512-byte sector setup subsystem" 1152 " plugin: %s\n", dev->transport->name); 1153 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1154 return TCM_INVALID_CDB_FIELD; 1155 } 1156 /* 1157 * For the overflow case keep the existing fabric provided 1158 * ->data_length. Otherwise for the underflow case, reset 1159 * ->data_length to the smaller SCSI expected data transfer 1160 * length. 1161 */ 1162 if (size > cmd->data_length) { 1163 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1164 cmd->residual_count = (size - cmd->data_length); 1165 } else { 1166 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1167 cmd->residual_count = (cmd->data_length - size); 1168 cmd->data_length = size; 1169 } 1170 } 1171 1172 return target_check_max_data_sg_nents(cmd, dev, size); 1173 1174 } 1175 1176 /* 1177 * Used by fabric modules containing a local struct se_cmd within their 1178 * fabric dependent per I/O descriptor. 1179 * 1180 * Preserves the value of @cmd->tag. 1181 */ 1182 void transport_init_se_cmd( 1183 struct se_cmd *cmd, 1184 const struct target_core_fabric_ops *tfo, 1185 struct se_session *se_sess, 1186 u32 data_length, 1187 int data_direction, 1188 int task_attr, 1189 unsigned char *sense_buffer) 1190 { 1191 INIT_LIST_HEAD(&cmd->se_delayed_node); 1192 INIT_LIST_HEAD(&cmd->se_qf_node); 1193 INIT_LIST_HEAD(&cmd->se_cmd_list); 1194 INIT_LIST_HEAD(&cmd->state_list); 1195 init_completion(&cmd->t_transport_stop_comp); 1196 init_completion(&cmd->cmd_wait_comp); 1197 init_completion(&cmd->task_stop_comp); 1198 spin_lock_init(&cmd->t_state_lock); 1199 kref_init(&cmd->cmd_kref); 1200 cmd->transport_state = CMD_T_DEV_ACTIVE; 1201 1202 cmd->se_tfo = tfo; 1203 cmd->se_sess = se_sess; 1204 cmd->data_length = data_length; 1205 cmd->data_direction = data_direction; 1206 cmd->sam_task_attr = task_attr; 1207 cmd->sense_buffer = sense_buffer; 1208 1209 cmd->state_active = false; 1210 } 1211 EXPORT_SYMBOL(transport_init_se_cmd); 1212 1213 static sense_reason_t 1214 transport_check_alloc_task_attr(struct se_cmd *cmd) 1215 { 1216 struct se_device *dev = cmd->se_dev; 1217 1218 /* 1219 * Check if SAM Task Attribute emulation is enabled for this 1220 * struct se_device storage object 1221 */ 1222 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1223 return 0; 1224 1225 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1226 pr_debug("SAM Task Attribute ACA" 1227 " emulation is not supported\n"); 1228 return TCM_INVALID_CDB_FIELD; 1229 } 1230 1231 return 0; 1232 } 1233 1234 sense_reason_t 1235 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1236 { 1237 struct se_device *dev = cmd->se_dev; 1238 sense_reason_t ret; 1239 1240 /* 1241 * Ensure that the received CDB is less than the max (252 + 8) bytes 1242 * for VARIABLE_LENGTH_CMD 1243 */ 1244 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1245 pr_err("Received SCSI CDB with command_size: %d that" 1246 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1247 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1248 return TCM_INVALID_CDB_FIELD; 1249 } 1250 /* 1251 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1252 * allocate the additional extended CDB buffer now.. Otherwise 1253 * setup the pointer from __t_task_cdb to t_task_cdb. 1254 */ 1255 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1256 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1257 GFP_KERNEL); 1258 if (!cmd->t_task_cdb) { 1259 pr_err("Unable to allocate cmd->t_task_cdb" 1260 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1261 scsi_command_size(cdb), 1262 (unsigned long)sizeof(cmd->__t_task_cdb)); 1263 return TCM_OUT_OF_RESOURCES; 1264 } 1265 } else 1266 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1267 /* 1268 * Copy the original CDB into cmd-> 1269 */ 1270 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1271 1272 trace_target_sequencer_start(cmd); 1273 1274 /* 1275 * Check for an existing UNIT ATTENTION condition 1276 */ 1277 ret = target_scsi3_ua_check(cmd); 1278 if (ret) 1279 return ret; 1280 1281 ret = target_alua_state_check(cmd); 1282 if (ret) 1283 return ret; 1284 1285 ret = target_check_reservation(cmd); 1286 if (ret) { 1287 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1288 return ret; 1289 } 1290 1291 ret = dev->transport->parse_cdb(cmd); 1292 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1293 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1294 cmd->se_tfo->get_fabric_name(), 1295 cmd->se_sess->se_node_acl->initiatorname, 1296 cmd->t_task_cdb[0]); 1297 if (ret) 1298 return ret; 1299 1300 ret = transport_check_alloc_task_attr(cmd); 1301 if (ret) 1302 return ret; 1303 1304 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1305 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1306 return 0; 1307 } 1308 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1309 1310 /* 1311 * Used by fabric module frontends to queue tasks directly. 1312 * Many only be used from process context only 1313 */ 1314 int transport_handle_cdb_direct( 1315 struct se_cmd *cmd) 1316 { 1317 sense_reason_t ret; 1318 1319 if (!cmd->se_lun) { 1320 dump_stack(); 1321 pr_err("cmd->se_lun is NULL\n"); 1322 return -EINVAL; 1323 } 1324 if (in_interrupt()) { 1325 dump_stack(); 1326 pr_err("transport_generic_handle_cdb cannot be called" 1327 " from interrupt context\n"); 1328 return -EINVAL; 1329 } 1330 /* 1331 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1332 * outstanding descriptors are handled correctly during shutdown via 1333 * transport_wait_for_tasks() 1334 * 1335 * Also, we don't take cmd->t_state_lock here as we only expect 1336 * this to be called for initial descriptor submission. 1337 */ 1338 cmd->t_state = TRANSPORT_NEW_CMD; 1339 cmd->transport_state |= CMD_T_ACTIVE; 1340 1341 /* 1342 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1343 * so follow TRANSPORT_NEW_CMD processing thread context usage 1344 * and call transport_generic_request_failure() if necessary.. 1345 */ 1346 ret = transport_generic_new_cmd(cmd); 1347 if (ret) 1348 transport_generic_request_failure(cmd, ret); 1349 return 0; 1350 } 1351 EXPORT_SYMBOL(transport_handle_cdb_direct); 1352 1353 sense_reason_t 1354 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1355 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1356 { 1357 if (!sgl || !sgl_count) 1358 return 0; 1359 1360 /* 1361 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1362 * scatterlists already have been set to follow what the fabric 1363 * passes for the original expected data transfer length. 1364 */ 1365 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1366 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1367 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1368 return TCM_INVALID_CDB_FIELD; 1369 } 1370 1371 cmd->t_data_sg = sgl; 1372 cmd->t_data_nents = sgl_count; 1373 cmd->t_bidi_data_sg = sgl_bidi; 1374 cmd->t_bidi_data_nents = sgl_bidi_count; 1375 1376 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1377 return 0; 1378 } 1379 1380 /* 1381 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1382 * se_cmd + use pre-allocated SGL memory. 1383 * 1384 * @se_cmd: command descriptor to submit 1385 * @se_sess: associated se_sess for endpoint 1386 * @cdb: pointer to SCSI CDB 1387 * @sense: pointer to SCSI sense buffer 1388 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1389 * @data_length: fabric expected data transfer length 1390 * @task_addr: SAM task attribute 1391 * @data_dir: DMA data direction 1392 * @flags: flags for command submission from target_sc_flags_tables 1393 * @sgl: struct scatterlist memory for unidirectional mapping 1394 * @sgl_count: scatterlist count for unidirectional mapping 1395 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1396 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1397 * @sgl_prot: struct scatterlist memory protection information 1398 * @sgl_prot_count: scatterlist count for protection information 1399 * 1400 * Task tags are supported if the caller has set @se_cmd->tag. 1401 * 1402 * Returns non zero to signal active I/O shutdown failure. All other 1403 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1404 * but still return zero here. 1405 * 1406 * This may only be called from process context, and also currently 1407 * assumes internal allocation of fabric payload buffer by target-core. 1408 */ 1409 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1410 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1411 u32 data_length, int task_attr, int data_dir, int flags, 1412 struct scatterlist *sgl, u32 sgl_count, 1413 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1414 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1415 { 1416 struct se_portal_group *se_tpg; 1417 sense_reason_t rc; 1418 int ret; 1419 1420 se_tpg = se_sess->se_tpg; 1421 BUG_ON(!se_tpg); 1422 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1423 BUG_ON(in_interrupt()); 1424 /* 1425 * Initialize se_cmd for target operation. From this point 1426 * exceptions are handled by sending exception status via 1427 * target_core_fabric_ops->queue_status() callback 1428 */ 1429 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1430 data_length, data_dir, task_attr, sense); 1431 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1432 se_cmd->unknown_data_length = 1; 1433 /* 1434 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1435 * se_sess->sess_cmd_list. A second kref_get here is necessary 1436 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1437 * kref_put() to happen during fabric packet acknowledgement. 1438 */ 1439 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1440 if (ret) 1441 return ret; 1442 /* 1443 * Signal bidirectional data payloads to target-core 1444 */ 1445 if (flags & TARGET_SCF_BIDI_OP) 1446 se_cmd->se_cmd_flags |= SCF_BIDI; 1447 /* 1448 * Locate se_lun pointer and attach it to struct se_cmd 1449 */ 1450 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1451 if (rc) { 1452 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1453 target_put_sess_cmd(se_cmd); 1454 return 0; 1455 } 1456 1457 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1458 if (rc != 0) { 1459 transport_generic_request_failure(se_cmd, rc); 1460 return 0; 1461 } 1462 1463 /* 1464 * Save pointers for SGLs containing protection information, 1465 * if present. 1466 */ 1467 if (sgl_prot_count) { 1468 se_cmd->t_prot_sg = sgl_prot; 1469 se_cmd->t_prot_nents = sgl_prot_count; 1470 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1471 } 1472 1473 /* 1474 * When a non zero sgl_count has been passed perform SGL passthrough 1475 * mapping for pre-allocated fabric memory instead of having target 1476 * core perform an internal SGL allocation.. 1477 */ 1478 if (sgl_count != 0) { 1479 BUG_ON(!sgl); 1480 1481 /* 1482 * A work-around for tcm_loop as some userspace code via 1483 * scsi-generic do not memset their associated read buffers, 1484 * so go ahead and do that here for type non-data CDBs. Also 1485 * note that this is currently guaranteed to be a single SGL 1486 * for this case by target core in target_setup_cmd_from_cdb() 1487 * -> transport_generic_cmd_sequencer(). 1488 */ 1489 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1490 se_cmd->data_direction == DMA_FROM_DEVICE) { 1491 unsigned char *buf = NULL; 1492 1493 if (sgl) 1494 buf = kmap(sg_page(sgl)) + sgl->offset; 1495 1496 if (buf) { 1497 memset(buf, 0, sgl->length); 1498 kunmap(sg_page(sgl)); 1499 } 1500 } 1501 1502 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1503 sgl_bidi, sgl_bidi_count); 1504 if (rc != 0) { 1505 transport_generic_request_failure(se_cmd, rc); 1506 return 0; 1507 } 1508 } 1509 1510 /* 1511 * Check if we need to delay processing because of ALUA 1512 * Active/NonOptimized primary access state.. 1513 */ 1514 core_alua_check_nonop_delay(se_cmd); 1515 1516 transport_handle_cdb_direct(se_cmd); 1517 return 0; 1518 } 1519 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1520 1521 /* 1522 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1523 * 1524 * @se_cmd: command descriptor to submit 1525 * @se_sess: associated se_sess for endpoint 1526 * @cdb: pointer to SCSI CDB 1527 * @sense: pointer to SCSI sense buffer 1528 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1529 * @data_length: fabric expected data transfer length 1530 * @task_addr: SAM task attribute 1531 * @data_dir: DMA data direction 1532 * @flags: flags for command submission from target_sc_flags_tables 1533 * 1534 * Task tags are supported if the caller has set @se_cmd->tag. 1535 * 1536 * Returns non zero to signal active I/O shutdown failure. All other 1537 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1538 * but still return zero here. 1539 * 1540 * This may only be called from process context, and also currently 1541 * assumes internal allocation of fabric payload buffer by target-core. 1542 * 1543 * It also assumes interal target core SGL memory allocation. 1544 */ 1545 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1546 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1547 u32 data_length, int task_attr, int data_dir, int flags) 1548 { 1549 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1550 unpacked_lun, data_length, task_attr, data_dir, 1551 flags, NULL, 0, NULL, 0, NULL, 0); 1552 } 1553 EXPORT_SYMBOL(target_submit_cmd); 1554 1555 static void target_complete_tmr_failure(struct work_struct *work) 1556 { 1557 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1558 1559 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1560 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1561 1562 transport_cmd_check_stop_to_fabric(se_cmd); 1563 } 1564 1565 /** 1566 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1567 * for TMR CDBs 1568 * 1569 * @se_cmd: command descriptor to submit 1570 * @se_sess: associated se_sess for endpoint 1571 * @sense: pointer to SCSI sense buffer 1572 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1573 * @fabric_context: fabric context for TMR req 1574 * @tm_type: Type of TM request 1575 * @gfp: gfp type for caller 1576 * @tag: referenced task tag for TMR_ABORT_TASK 1577 * @flags: submit cmd flags 1578 * 1579 * Callable from all contexts. 1580 **/ 1581 1582 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1583 unsigned char *sense, u64 unpacked_lun, 1584 void *fabric_tmr_ptr, unsigned char tm_type, 1585 gfp_t gfp, unsigned int tag, int flags) 1586 { 1587 struct se_portal_group *se_tpg; 1588 int ret; 1589 1590 se_tpg = se_sess->se_tpg; 1591 BUG_ON(!se_tpg); 1592 1593 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1594 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1595 /* 1596 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1597 * allocation failure. 1598 */ 1599 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1600 if (ret < 0) 1601 return -ENOMEM; 1602 1603 if (tm_type == TMR_ABORT_TASK) 1604 se_cmd->se_tmr_req->ref_task_tag = tag; 1605 1606 /* See target_submit_cmd for commentary */ 1607 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1608 if (ret) { 1609 core_tmr_release_req(se_cmd->se_tmr_req); 1610 return ret; 1611 } 1612 1613 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1614 if (ret) { 1615 /* 1616 * For callback during failure handling, push this work off 1617 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1618 */ 1619 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1620 schedule_work(&se_cmd->work); 1621 return 0; 1622 } 1623 transport_generic_handle_tmr(se_cmd); 1624 return 0; 1625 } 1626 EXPORT_SYMBOL(target_submit_tmr); 1627 1628 /* 1629 * If the cmd is active, request it to be stopped and sleep until it 1630 * has completed. 1631 */ 1632 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) 1633 __releases(&cmd->t_state_lock) 1634 __acquires(&cmd->t_state_lock) 1635 { 1636 bool was_active = false; 1637 1638 if (cmd->transport_state & CMD_T_BUSY) { 1639 cmd->transport_state |= CMD_T_REQUEST_STOP; 1640 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1641 1642 pr_debug("cmd %p waiting to complete\n", cmd); 1643 wait_for_completion(&cmd->task_stop_comp); 1644 pr_debug("cmd %p stopped successfully\n", cmd); 1645 1646 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1647 cmd->transport_state &= ~CMD_T_REQUEST_STOP; 1648 cmd->transport_state &= ~CMD_T_BUSY; 1649 was_active = true; 1650 } 1651 1652 return was_active; 1653 } 1654 1655 /* 1656 * Handle SAM-esque emulation for generic transport request failures. 1657 */ 1658 void transport_generic_request_failure(struct se_cmd *cmd, 1659 sense_reason_t sense_reason) 1660 { 1661 int ret = 0, post_ret = 0; 1662 1663 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1664 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1665 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1666 cmd->se_tfo->get_cmd_state(cmd), 1667 cmd->t_state, sense_reason); 1668 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1669 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1670 (cmd->transport_state & CMD_T_STOP) != 0, 1671 (cmd->transport_state & CMD_T_SENT) != 0); 1672 1673 /* 1674 * For SAM Task Attribute emulation for failed struct se_cmd 1675 */ 1676 transport_complete_task_attr(cmd); 1677 /* 1678 * Handle special case for COMPARE_AND_WRITE failure, where the 1679 * callback is expected to drop the per device ->caw_sem. 1680 */ 1681 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1682 cmd->transport_complete_callback) 1683 cmd->transport_complete_callback(cmd, false, &post_ret); 1684 1685 switch (sense_reason) { 1686 case TCM_NON_EXISTENT_LUN: 1687 case TCM_UNSUPPORTED_SCSI_OPCODE: 1688 case TCM_INVALID_CDB_FIELD: 1689 case TCM_INVALID_PARAMETER_LIST: 1690 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1691 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1692 case TCM_UNKNOWN_MODE_PAGE: 1693 case TCM_WRITE_PROTECTED: 1694 case TCM_ADDRESS_OUT_OF_RANGE: 1695 case TCM_CHECK_CONDITION_ABORT_CMD: 1696 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1697 case TCM_CHECK_CONDITION_NOT_READY: 1698 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1699 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1700 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1701 break; 1702 case TCM_OUT_OF_RESOURCES: 1703 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1704 break; 1705 case TCM_RESERVATION_CONFLICT: 1706 /* 1707 * No SENSE Data payload for this case, set SCSI Status 1708 * and queue the response to $FABRIC_MOD. 1709 * 1710 * Uses linux/include/scsi/scsi.h SAM status codes defs 1711 */ 1712 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1713 /* 1714 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1715 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1716 * CONFLICT STATUS. 1717 * 1718 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1719 */ 1720 if (cmd->se_sess && 1721 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1722 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1723 cmd->orig_fe_lun, 0x2C, 1724 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1725 } 1726 trace_target_cmd_complete(cmd); 1727 ret = cmd->se_tfo->queue_status(cmd); 1728 if (ret == -EAGAIN || ret == -ENOMEM) 1729 goto queue_full; 1730 goto check_stop; 1731 default: 1732 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1733 cmd->t_task_cdb[0], sense_reason); 1734 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1735 break; 1736 } 1737 1738 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1739 if (ret == -EAGAIN || ret == -ENOMEM) 1740 goto queue_full; 1741 1742 check_stop: 1743 transport_lun_remove_cmd(cmd); 1744 transport_cmd_check_stop_to_fabric(cmd); 1745 return; 1746 1747 queue_full: 1748 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1749 transport_handle_queue_full(cmd, cmd->se_dev); 1750 } 1751 EXPORT_SYMBOL(transport_generic_request_failure); 1752 1753 void __target_execute_cmd(struct se_cmd *cmd) 1754 { 1755 sense_reason_t ret; 1756 1757 if (cmd->execute_cmd) { 1758 ret = cmd->execute_cmd(cmd); 1759 if (ret) { 1760 spin_lock_irq(&cmd->t_state_lock); 1761 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1762 spin_unlock_irq(&cmd->t_state_lock); 1763 1764 transport_generic_request_failure(cmd, ret); 1765 } 1766 } 1767 } 1768 1769 static int target_write_prot_action(struct se_cmd *cmd) 1770 { 1771 u32 sectors; 1772 /* 1773 * Perform WRITE_INSERT of PI using software emulation when backend 1774 * device has PI enabled, if the transport has not already generated 1775 * PI using hardware WRITE_INSERT offload. 1776 */ 1777 switch (cmd->prot_op) { 1778 case TARGET_PROT_DOUT_INSERT: 1779 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1780 sbc_dif_generate(cmd); 1781 break; 1782 case TARGET_PROT_DOUT_STRIP: 1783 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1784 break; 1785 1786 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1787 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1788 sectors, 0, cmd->t_prot_sg, 0); 1789 if (unlikely(cmd->pi_err)) { 1790 spin_lock_irq(&cmd->t_state_lock); 1791 cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); 1792 spin_unlock_irq(&cmd->t_state_lock); 1793 transport_generic_request_failure(cmd, cmd->pi_err); 1794 return -1; 1795 } 1796 break; 1797 default: 1798 break; 1799 } 1800 1801 return 0; 1802 } 1803 1804 static bool target_handle_task_attr(struct se_cmd *cmd) 1805 { 1806 struct se_device *dev = cmd->se_dev; 1807 1808 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1809 return false; 1810 1811 /* 1812 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1813 * to allow the passed struct se_cmd list of tasks to the front of the list. 1814 */ 1815 switch (cmd->sam_task_attr) { 1816 case TCM_HEAD_TAG: 1817 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 1818 cmd->t_task_cdb[0]); 1819 return false; 1820 case TCM_ORDERED_TAG: 1821 atomic_inc_mb(&dev->dev_ordered_sync); 1822 1823 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 1824 cmd->t_task_cdb[0]); 1825 1826 /* 1827 * Execute an ORDERED command if no other older commands 1828 * exist that need to be completed first. 1829 */ 1830 if (!atomic_read(&dev->simple_cmds)) 1831 return false; 1832 break; 1833 default: 1834 /* 1835 * For SIMPLE and UNTAGGED Task Attribute commands 1836 */ 1837 atomic_inc_mb(&dev->simple_cmds); 1838 break; 1839 } 1840 1841 if (atomic_read(&dev->dev_ordered_sync) == 0) 1842 return false; 1843 1844 spin_lock(&dev->delayed_cmd_lock); 1845 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1846 spin_unlock(&dev->delayed_cmd_lock); 1847 1848 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 1849 cmd->t_task_cdb[0], cmd->sam_task_attr); 1850 return true; 1851 } 1852 1853 void target_execute_cmd(struct se_cmd *cmd) 1854 { 1855 /* 1856 * If the received CDB has aleady been aborted stop processing it here. 1857 */ 1858 if (transport_check_aborted_status(cmd, 1)) 1859 return; 1860 1861 /* 1862 * Determine if frontend context caller is requesting the stopping of 1863 * this command for frontend exceptions. 1864 */ 1865 spin_lock_irq(&cmd->t_state_lock); 1866 if (cmd->transport_state & CMD_T_STOP) { 1867 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 1868 __func__, __LINE__, cmd->tag); 1869 1870 spin_unlock_irq(&cmd->t_state_lock); 1871 complete_all(&cmd->t_transport_stop_comp); 1872 return; 1873 } 1874 1875 cmd->t_state = TRANSPORT_PROCESSING; 1876 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 1877 spin_unlock_irq(&cmd->t_state_lock); 1878 1879 if (target_write_prot_action(cmd)) 1880 return; 1881 1882 if (target_handle_task_attr(cmd)) { 1883 spin_lock_irq(&cmd->t_state_lock); 1884 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); 1885 spin_unlock_irq(&cmd->t_state_lock); 1886 return; 1887 } 1888 1889 __target_execute_cmd(cmd); 1890 } 1891 EXPORT_SYMBOL(target_execute_cmd); 1892 1893 /* 1894 * Process all commands up to the last received ORDERED task attribute which 1895 * requires another blocking boundary 1896 */ 1897 static void target_restart_delayed_cmds(struct se_device *dev) 1898 { 1899 for (;;) { 1900 struct se_cmd *cmd; 1901 1902 spin_lock(&dev->delayed_cmd_lock); 1903 if (list_empty(&dev->delayed_cmd_list)) { 1904 spin_unlock(&dev->delayed_cmd_lock); 1905 break; 1906 } 1907 1908 cmd = list_entry(dev->delayed_cmd_list.next, 1909 struct se_cmd, se_delayed_node); 1910 list_del(&cmd->se_delayed_node); 1911 spin_unlock(&dev->delayed_cmd_lock); 1912 1913 __target_execute_cmd(cmd); 1914 1915 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 1916 break; 1917 } 1918 } 1919 1920 /* 1921 * Called from I/O completion to determine which dormant/delayed 1922 * and ordered cmds need to have their tasks added to the execution queue. 1923 */ 1924 static void transport_complete_task_attr(struct se_cmd *cmd) 1925 { 1926 struct se_device *dev = cmd->se_dev; 1927 1928 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1929 return; 1930 1931 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 1932 atomic_dec_mb(&dev->simple_cmds); 1933 dev->dev_cur_ordered_id++; 1934 pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n", 1935 dev->dev_cur_ordered_id); 1936 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 1937 dev->dev_cur_ordered_id++; 1938 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 1939 dev->dev_cur_ordered_id); 1940 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 1941 atomic_dec_mb(&dev->dev_ordered_sync); 1942 1943 dev->dev_cur_ordered_id++; 1944 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 1945 dev->dev_cur_ordered_id); 1946 } 1947 1948 target_restart_delayed_cmds(dev); 1949 } 1950 1951 static void transport_complete_qf(struct se_cmd *cmd) 1952 { 1953 int ret = 0; 1954 1955 transport_complete_task_attr(cmd); 1956 1957 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1958 trace_target_cmd_complete(cmd); 1959 ret = cmd->se_tfo->queue_status(cmd); 1960 goto out; 1961 } 1962 1963 switch (cmd->data_direction) { 1964 case DMA_FROM_DEVICE: 1965 trace_target_cmd_complete(cmd); 1966 ret = cmd->se_tfo->queue_data_in(cmd); 1967 break; 1968 case DMA_TO_DEVICE: 1969 if (cmd->se_cmd_flags & SCF_BIDI) { 1970 ret = cmd->se_tfo->queue_data_in(cmd); 1971 break; 1972 } 1973 /* Fall through for DMA_TO_DEVICE */ 1974 case DMA_NONE: 1975 trace_target_cmd_complete(cmd); 1976 ret = cmd->se_tfo->queue_status(cmd); 1977 break; 1978 default: 1979 break; 1980 } 1981 1982 out: 1983 if (ret < 0) { 1984 transport_handle_queue_full(cmd, cmd->se_dev); 1985 return; 1986 } 1987 transport_lun_remove_cmd(cmd); 1988 transport_cmd_check_stop_to_fabric(cmd); 1989 } 1990 1991 static void transport_handle_queue_full( 1992 struct se_cmd *cmd, 1993 struct se_device *dev) 1994 { 1995 spin_lock_irq(&dev->qf_cmd_lock); 1996 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1997 atomic_inc_mb(&dev->dev_qf_count); 1998 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1999 2000 schedule_work(&cmd->se_dev->qf_work_queue); 2001 } 2002 2003 static bool target_read_prot_action(struct se_cmd *cmd) 2004 { 2005 switch (cmd->prot_op) { 2006 case TARGET_PROT_DIN_STRIP: 2007 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2008 u32 sectors = cmd->data_length >> 2009 ilog2(cmd->se_dev->dev_attrib.block_size); 2010 2011 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2012 sectors, 0, cmd->t_prot_sg, 2013 0); 2014 if (cmd->pi_err) 2015 return true; 2016 } 2017 break; 2018 case TARGET_PROT_DIN_INSERT: 2019 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2020 break; 2021 2022 sbc_dif_generate(cmd); 2023 break; 2024 default: 2025 break; 2026 } 2027 2028 return false; 2029 } 2030 2031 static void target_complete_ok_work(struct work_struct *work) 2032 { 2033 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2034 int ret; 2035 2036 /* 2037 * Check if we need to move delayed/dormant tasks from cmds on the 2038 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2039 * Attribute. 2040 */ 2041 transport_complete_task_attr(cmd); 2042 2043 /* 2044 * Check to schedule QUEUE_FULL work, or execute an existing 2045 * cmd->transport_qf_callback() 2046 */ 2047 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2048 schedule_work(&cmd->se_dev->qf_work_queue); 2049 2050 /* 2051 * Check if we need to send a sense buffer from 2052 * the struct se_cmd in question. 2053 */ 2054 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2055 WARN_ON(!cmd->scsi_status); 2056 ret = transport_send_check_condition_and_sense( 2057 cmd, 0, 1); 2058 if (ret == -EAGAIN || ret == -ENOMEM) 2059 goto queue_full; 2060 2061 transport_lun_remove_cmd(cmd); 2062 transport_cmd_check_stop_to_fabric(cmd); 2063 return; 2064 } 2065 /* 2066 * Check for a callback, used by amongst other things 2067 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2068 */ 2069 if (cmd->transport_complete_callback) { 2070 sense_reason_t rc; 2071 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2072 bool zero_dl = !(cmd->data_length); 2073 int post_ret = 0; 2074 2075 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2076 if (!rc && !post_ret) { 2077 if (caw && zero_dl) 2078 goto queue_rsp; 2079 2080 return; 2081 } else if (rc) { 2082 ret = transport_send_check_condition_and_sense(cmd, 2083 rc, 0); 2084 if (ret == -EAGAIN || ret == -ENOMEM) 2085 goto queue_full; 2086 2087 transport_lun_remove_cmd(cmd); 2088 transport_cmd_check_stop_to_fabric(cmd); 2089 return; 2090 } 2091 } 2092 2093 queue_rsp: 2094 switch (cmd->data_direction) { 2095 case DMA_FROM_DEVICE: 2096 atomic_long_add(cmd->data_length, 2097 &cmd->se_lun->lun_stats.tx_data_octets); 2098 /* 2099 * Perform READ_STRIP of PI using software emulation when 2100 * backend had PI enabled, if the transport will not be 2101 * performing hardware READ_STRIP offload. 2102 */ 2103 if (target_read_prot_action(cmd)) { 2104 ret = transport_send_check_condition_and_sense(cmd, 2105 cmd->pi_err, 0); 2106 if (ret == -EAGAIN || ret == -ENOMEM) 2107 goto queue_full; 2108 2109 transport_lun_remove_cmd(cmd); 2110 transport_cmd_check_stop_to_fabric(cmd); 2111 return; 2112 } 2113 2114 trace_target_cmd_complete(cmd); 2115 ret = cmd->se_tfo->queue_data_in(cmd); 2116 if (ret == -EAGAIN || ret == -ENOMEM) 2117 goto queue_full; 2118 break; 2119 case DMA_TO_DEVICE: 2120 atomic_long_add(cmd->data_length, 2121 &cmd->se_lun->lun_stats.rx_data_octets); 2122 /* 2123 * Check if we need to send READ payload for BIDI-COMMAND 2124 */ 2125 if (cmd->se_cmd_flags & SCF_BIDI) { 2126 atomic_long_add(cmd->data_length, 2127 &cmd->se_lun->lun_stats.tx_data_octets); 2128 ret = cmd->se_tfo->queue_data_in(cmd); 2129 if (ret == -EAGAIN || ret == -ENOMEM) 2130 goto queue_full; 2131 break; 2132 } 2133 /* Fall through for DMA_TO_DEVICE */ 2134 case DMA_NONE: 2135 trace_target_cmd_complete(cmd); 2136 ret = cmd->se_tfo->queue_status(cmd); 2137 if (ret == -EAGAIN || ret == -ENOMEM) 2138 goto queue_full; 2139 break; 2140 default: 2141 break; 2142 } 2143 2144 transport_lun_remove_cmd(cmd); 2145 transport_cmd_check_stop_to_fabric(cmd); 2146 return; 2147 2148 queue_full: 2149 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2150 " data_direction: %d\n", cmd, cmd->data_direction); 2151 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2152 transport_handle_queue_full(cmd, cmd->se_dev); 2153 } 2154 2155 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 2156 { 2157 struct scatterlist *sg; 2158 int count; 2159 2160 for_each_sg(sgl, sg, nents, count) 2161 __free_page(sg_page(sg)); 2162 2163 kfree(sgl); 2164 } 2165 2166 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2167 { 2168 /* 2169 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2170 * emulation, and free + reset pointers if necessary.. 2171 */ 2172 if (!cmd->t_data_sg_orig) 2173 return; 2174 2175 kfree(cmd->t_data_sg); 2176 cmd->t_data_sg = cmd->t_data_sg_orig; 2177 cmd->t_data_sg_orig = NULL; 2178 cmd->t_data_nents = cmd->t_data_nents_orig; 2179 cmd->t_data_nents_orig = 0; 2180 } 2181 2182 static inline void transport_free_pages(struct se_cmd *cmd) 2183 { 2184 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2185 transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2186 cmd->t_prot_sg = NULL; 2187 cmd->t_prot_nents = 0; 2188 } 2189 2190 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2191 /* 2192 * Release special case READ buffer payload required for 2193 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2194 */ 2195 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2196 transport_free_sgl(cmd->t_bidi_data_sg, 2197 cmd->t_bidi_data_nents); 2198 cmd->t_bidi_data_sg = NULL; 2199 cmd->t_bidi_data_nents = 0; 2200 } 2201 transport_reset_sgl_orig(cmd); 2202 return; 2203 } 2204 transport_reset_sgl_orig(cmd); 2205 2206 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2207 cmd->t_data_sg = NULL; 2208 cmd->t_data_nents = 0; 2209 2210 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2211 cmd->t_bidi_data_sg = NULL; 2212 cmd->t_bidi_data_nents = 0; 2213 } 2214 2215 /** 2216 * transport_release_cmd - free a command 2217 * @cmd: command to free 2218 * 2219 * This routine unconditionally frees a command, and reference counting 2220 * or list removal must be done in the caller. 2221 */ 2222 static int transport_release_cmd(struct se_cmd *cmd) 2223 { 2224 BUG_ON(!cmd->se_tfo); 2225 2226 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2227 core_tmr_release_req(cmd->se_tmr_req); 2228 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2229 kfree(cmd->t_task_cdb); 2230 /* 2231 * If this cmd has been setup with target_get_sess_cmd(), drop 2232 * the kref and call ->release_cmd() in kref callback. 2233 */ 2234 return target_put_sess_cmd(cmd); 2235 } 2236 2237 /** 2238 * transport_put_cmd - release a reference to a command 2239 * @cmd: command to release 2240 * 2241 * This routine releases our reference to the command and frees it if possible. 2242 */ 2243 static int transport_put_cmd(struct se_cmd *cmd) 2244 { 2245 transport_free_pages(cmd); 2246 return transport_release_cmd(cmd); 2247 } 2248 2249 void *transport_kmap_data_sg(struct se_cmd *cmd) 2250 { 2251 struct scatterlist *sg = cmd->t_data_sg; 2252 struct page **pages; 2253 int i; 2254 2255 /* 2256 * We need to take into account a possible offset here for fabrics like 2257 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2258 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2259 */ 2260 if (!cmd->t_data_nents) 2261 return NULL; 2262 2263 BUG_ON(!sg); 2264 if (cmd->t_data_nents == 1) 2265 return kmap(sg_page(sg)) + sg->offset; 2266 2267 /* >1 page. use vmap */ 2268 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2269 if (!pages) 2270 return NULL; 2271 2272 /* convert sg[] to pages[] */ 2273 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2274 pages[i] = sg_page(sg); 2275 } 2276 2277 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2278 kfree(pages); 2279 if (!cmd->t_data_vmap) 2280 return NULL; 2281 2282 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2283 } 2284 EXPORT_SYMBOL(transport_kmap_data_sg); 2285 2286 void transport_kunmap_data_sg(struct se_cmd *cmd) 2287 { 2288 if (!cmd->t_data_nents) { 2289 return; 2290 } else if (cmd->t_data_nents == 1) { 2291 kunmap(sg_page(cmd->t_data_sg)); 2292 return; 2293 } 2294 2295 vunmap(cmd->t_data_vmap); 2296 cmd->t_data_vmap = NULL; 2297 } 2298 EXPORT_SYMBOL(transport_kunmap_data_sg); 2299 2300 int 2301 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2302 bool zero_page) 2303 { 2304 struct scatterlist *sg; 2305 struct page *page; 2306 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2307 unsigned int nent; 2308 int i = 0; 2309 2310 nent = DIV_ROUND_UP(length, PAGE_SIZE); 2311 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); 2312 if (!sg) 2313 return -ENOMEM; 2314 2315 sg_init_table(sg, nent); 2316 2317 while (length) { 2318 u32 page_len = min_t(u32, length, PAGE_SIZE); 2319 page = alloc_page(GFP_KERNEL | zero_flag); 2320 if (!page) 2321 goto out; 2322 2323 sg_set_page(&sg[i], page, page_len, 0); 2324 length -= page_len; 2325 i++; 2326 } 2327 *sgl = sg; 2328 *nents = nent; 2329 return 0; 2330 2331 out: 2332 while (i > 0) { 2333 i--; 2334 __free_page(sg_page(&sg[i])); 2335 } 2336 kfree(sg); 2337 return -ENOMEM; 2338 } 2339 2340 /* 2341 * Allocate any required resources to execute the command. For writes we 2342 * might not have the payload yet, so notify the fabric via a call to 2343 * ->write_pending instead. Otherwise place it on the execution queue. 2344 */ 2345 sense_reason_t 2346 transport_generic_new_cmd(struct se_cmd *cmd) 2347 { 2348 int ret = 0; 2349 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2350 2351 if (cmd->prot_op != TARGET_PROT_NORMAL && 2352 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2353 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2354 cmd->prot_length, true); 2355 if (ret < 0) 2356 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2357 } 2358 2359 /* 2360 * Determine is the TCM fabric module has already allocated physical 2361 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2362 * beforehand. 2363 */ 2364 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2365 cmd->data_length) { 2366 2367 if ((cmd->se_cmd_flags & SCF_BIDI) || 2368 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2369 u32 bidi_length; 2370 2371 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2372 bidi_length = cmd->t_task_nolb * 2373 cmd->se_dev->dev_attrib.block_size; 2374 else 2375 bidi_length = cmd->data_length; 2376 2377 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2378 &cmd->t_bidi_data_nents, 2379 bidi_length, zero_flag); 2380 if (ret < 0) 2381 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2382 } 2383 2384 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2385 cmd->data_length, zero_flag); 2386 if (ret < 0) 2387 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2388 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2389 cmd->data_length) { 2390 /* 2391 * Special case for COMPARE_AND_WRITE with fabrics 2392 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2393 */ 2394 u32 caw_length = cmd->t_task_nolb * 2395 cmd->se_dev->dev_attrib.block_size; 2396 2397 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2398 &cmd->t_bidi_data_nents, 2399 caw_length, zero_flag); 2400 if (ret < 0) 2401 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2402 } 2403 /* 2404 * If this command is not a write we can execute it right here, 2405 * for write buffers we need to notify the fabric driver first 2406 * and let it call back once the write buffers are ready. 2407 */ 2408 target_add_to_state_list(cmd); 2409 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2410 target_execute_cmd(cmd); 2411 return 0; 2412 } 2413 transport_cmd_check_stop(cmd, false, true); 2414 2415 ret = cmd->se_tfo->write_pending(cmd); 2416 if (ret == -EAGAIN || ret == -ENOMEM) 2417 goto queue_full; 2418 2419 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2420 WARN_ON(ret); 2421 2422 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2423 2424 queue_full: 2425 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2426 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2427 transport_handle_queue_full(cmd, cmd->se_dev); 2428 return 0; 2429 } 2430 EXPORT_SYMBOL(transport_generic_new_cmd); 2431 2432 static void transport_write_pending_qf(struct se_cmd *cmd) 2433 { 2434 int ret; 2435 2436 ret = cmd->se_tfo->write_pending(cmd); 2437 if (ret == -EAGAIN || ret == -ENOMEM) { 2438 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2439 cmd); 2440 transport_handle_queue_full(cmd, cmd->se_dev); 2441 } 2442 } 2443 2444 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2445 { 2446 unsigned long flags; 2447 int ret = 0; 2448 2449 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2450 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2451 transport_wait_for_tasks(cmd); 2452 2453 ret = transport_release_cmd(cmd); 2454 } else { 2455 if (wait_for_tasks) 2456 transport_wait_for_tasks(cmd); 2457 /* 2458 * Handle WRITE failure case where transport_generic_new_cmd() 2459 * has already added se_cmd to state_list, but fabric has 2460 * failed command before I/O submission. 2461 */ 2462 if (cmd->state_active) { 2463 spin_lock_irqsave(&cmd->t_state_lock, flags); 2464 target_remove_from_state_list(cmd); 2465 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2466 } 2467 2468 if (cmd->se_lun) 2469 transport_lun_remove_cmd(cmd); 2470 2471 ret = transport_put_cmd(cmd); 2472 } 2473 return ret; 2474 } 2475 EXPORT_SYMBOL(transport_generic_free_cmd); 2476 2477 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2478 * @se_cmd: command descriptor to add 2479 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2480 */ 2481 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2482 { 2483 struct se_session *se_sess = se_cmd->se_sess; 2484 unsigned long flags; 2485 int ret = 0; 2486 2487 /* 2488 * Add a second kref if the fabric caller is expecting to handle 2489 * fabric acknowledgement that requires two target_put_sess_cmd() 2490 * invocations before se_cmd descriptor release. 2491 */ 2492 if (ack_kref) 2493 kref_get(&se_cmd->cmd_kref); 2494 2495 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2496 if (se_sess->sess_tearing_down) { 2497 ret = -ESHUTDOWN; 2498 goto out; 2499 } 2500 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2501 out: 2502 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2503 2504 if (ret && ack_kref) 2505 target_put_sess_cmd(se_cmd); 2506 2507 return ret; 2508 } 2509 EXPORT_SYMBOL(target_get_sess_cmd); 2510 2511 static void target_release_cmd_kref(struct kref *kref) 2512 { 2513 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2514 struct se_session *se_sess = se_cmd->se_sess; 2515 unsigned long flags; 2516 2517 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2518 if (list_empty(&se_cmd->se_cmd_list)) { 2519 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2520 se_cmd->se_tfo->release_cmd(se_cmd); 2521 return; 2522 } 2523 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2524 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2525 complete(&se_cmd->cmd_wait_comp); 2526 return; 2527 } 2528 list_del(&se_cmd->se_cmd_list); 2529 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2530 2531 se_cmd->se_tfo->release_cmd(se_cmd); 2532 } 2533 2534 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 2535 * @se_cmd: command descriptor to drop 2536 */ 2537 int target_put_sess_cmd(struct se_cmd *se_cmd) 2538 { 2539 struct se_session *se_sess = se_cmd->se_sess; 2540 2541 if (!se_sess) { 2542 se_cmd->se_tfo->release_cmd(se_cmd); 2543 return 1; 2544 } 2545 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2546 } 2547 EXPORT_SYMBOL(target_put_sess_cmd); 2548 2549 /* target_sess_cmd_list_set_waiting - Flag all commands in 2550 * sess_cmd_list to complete cmd_wait_comp. Set 2551 * sess_tearing_down so no more commands are queued. 2552 * @se_sess: session to flag 2553 */ 2554 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2555 { 2556 struct se_cmd *se_cmd; 2557 unsigned long flags; 2558 2559 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2560 if (se_sess->sess_tearing_down) { 2561 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2562 return; 2563 } 2564 se_sess->sess_tearing_down = 1; 2565 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2566 2567 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 2568 se_cmd->cmd_wait_set = 1; 2569 2570 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2571 } 2572 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2573 2574 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2575 * @se_sess: session to wait for active I/O 2576 */ 2577 void target_wait_for_sess_cmds(struct se_session *se_sess) 2578 { 2579 struct se_cmd *se_cmd, *tmp_cmd; 2580 unsigned long flags; 2581 2582 list_for_each_entry_safe(se_cmd, tmp_cmd, 2583 &se_sess->sess_wait_list, se_cmd_list) { 2584 list_del(&se_cmd->se_cmd_list); 2585 2586 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2587 " %d\n", se_cmd, se_cmd->t_state, 2588 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2589 2590 wait_for_completion(&se_cmd->cmd_wait_comp); 2591 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2592 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2593 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2594 2595 se_cmd->se_tfo->release_cmd(se_cmd); 2596 } 2597 2598 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2599 WARN_ON(!list_empty(&se_sess->sess_cmd_list)); 2600 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2601 2602 } 2603 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2604 2605 void transport_clear_lun_ref(struct se_lun *lun) 2606 { 2607 percpu_ref_kill(&lun->lun_ref); 2608 wait_for_completion(&lun->lun_ref_comp); 2609 } 2610 2611 /** 2612 * transport_wait_for_tasks - wait for completion to occur 2613 * @cmd: command to wait 2614 * 2615 * Called from frontend fabric context to wait for storage engine 2616 * to pause and/or release frontend generated struct se_cmd. 2617 */ 2618 bool transport_wait_for_tasks(struct se_cmd *cmd) 2619 { 2620 unsigned long flags; 2621 2622 spin_lock_irqsave(&cmd->t_state_lock, flags); 2623 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2624 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2625 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2626 return false; 2627 } 2628 2629 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2630 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2631 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2632 return false; 2633 } 2634 2635 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2636 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2637 return false; 2638 } 2639 2640 cmd->transport_state |= CMD_T_STOP; 2641 2642 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n", 2643 cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2644 2645 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2646 2647 wait_for_completion(&cmd->t_transport_stop_comp); 2648 2649 spin_lock_irqsave(&cmd->t_state_lock, flags); 2650 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2651 2652 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n", 2653 cmd->tag); 2654 2655 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2656 2657 return true; 2658 } 2659 EXPORT_SYMBOL(transport_wait_for_tasks); 2660 2661 struct sense_info { 2662 u8 key; 2663 u8 asc; 2664 u8 ascq; 2665 bool add_sector_info; 2666 }; 2667 2668 static const struct sense_info sense_info_table[] = { 2669 [TCM_NO_SENSE] = { 2670 .key = NOT_READY 2671 }, 2672 [TCM_NON_EXISTENT_LUN] = { 2673 .key = ILLEGAL_REQUEST, 2674 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 2675 }, 2676 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 2677 .key = ILLEGAL_REQUEST, 2678 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2679 }, 2680 [TCM_SECTOR_COUNT_TOO_MANY] = { 2681 .key = ILLEGAL_REQUEST, 2682 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2683 }, 2684 [TCM_UNKNOWN_MODE_PAGE] = { 2685 .key = ILLEGAL_REQUEST, 2686 .asc = 0x24, /* INVALID FIELD IN CDB */ 2687 }, 2688 [TCM_CHECK_CONDITION_ABORT_CMD] = { 2689 .key = ABORTED_COMMAND, 2690 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 2691 .ascq = 0x03, 2692 }, 2693 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 2694 .key = ABORTED_COMMAND, 2695 .asc = 0x0c, /* WRITE ERROR */ 2696 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 2697 }, 2698 [TCM_INVALID_CDB_FIELD] = { 2699 .key = ILLEGAL_REQUEST, 2700 .asc = 0x24, /* INVALID FIELD IN CDB */ 2701 }, 2702 [TCM_INVALID_PARAMETER_LIST] = { 2703 .key = ILLEGAL_REQUEST, 2704 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 2705 }, 2706 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 2707 .key = ILLEGAL_REQUEST, 2708 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 2709 }, 2710 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 2711 .key = ILLEGAL_REQUEST, 2712 .asc = 0x0c, /* WRITE ERROR */ 2713 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 2714 }, 2715 [TCM_SERVICE_CRC_ERROR] = { 2716 .key = ABORTED_COMMAND, 2717 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 2718 .ascq = 0x05, /* N/A */ 2719 }, 2720 [TCM_SNACK_REJECTED] = { 2721 .key = ABORTED_COMMAND, 2722 .asc = 0x11, /* READ ERROR */ 2723 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 2724 }, 2725 [TCM_WRITE_PROTECTED] = { 2726 .key = DATA_PROTECT, 2727 .asc = 0x27, /* WRITE PROTECTED */ 2728 }, 2729 [TCM_ADDRESS_OUT_OF_RANGE] = { 2730 .key = ILLEGAL_REQUEST, 2731 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2732 }, 2733 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 2734 .key = UNIT_ATTENTION, 2735 }, 2736 [TCM_CHECK_CONDITION_NOT_READY] = { 2737 .key = NOT_READY, 2738 }, 2739 [TCM_MISCOMPARE_VERIFY] = { 2740 .key = MISCOMPARE, 2741 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 2742 .ascq = 0x00, 2743 }, 2744 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 2745 .key = ABORTED_COMMAND, 2746 .asc = 0x10, 2747 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 2748 .add_sector_info = true, 2749 }, 2750 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 2751 .key = ABORTED_COMMAND, 2752 .asc = 0x10, 2753 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 2754 .add_sector_info = true, 2755 }, 2756 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 2757 .key = ABORTED_COMMAND, 2758 .asc = 0x10, 2759 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 2760 .add_sector_info = true, 2761 }, 2762 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 2763 /* 2764 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2765 * Solaris initiators. Returning NOT READY instead means the 2766 * operations will be retried a finite number of times and we 2767 * can survive intermittent errors. 2768 */ 2769 .key = NOT_READY, 2770 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 2771 }, 2772 }; 2773 2774 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 2775 { 2776 const struct sense_info *si; 2777 u8 *buffer = cmd->sense_buffer; 2778 int r = (__force int)reason; 2779 u8 asc, ascq; 2780 bool desc_format = target_sense_desc_format(cmd->se_dev); 2781 2782 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 2783 si = &sense_info_table[r]; 2784 else 2785 si = &sense_info_table[(__force int) 2786 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 2787 2788 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 2789 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2790 WARN_ON_ONCE(asc == 0); 2791 } else if (si->asc == 0) { 2792 WARN_ON_ONCE(cmd->scsi_asc == 0); 2793 asc = cmd->scsi_asc; 2794 ascq = cmd->scsi_ascq; 2795 } else { 2796 asc = si->asc; 2797 ascq = si->ascq; 2798 } 2799 2800 scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq); 2801 if (si->add_sector_info) 2802 return scsi_set_sense_information(buffer, 2803 cmd->scsi_sense_length, 2804 cmd->bad_sector); 2805 2806 return 0; 2807 } 2808 2809 int 2810 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2811 sense_reason_t reason, int from_transport) 2812 { 2813 unsigned long flags; 2814 2815 spin_lock_irqsave(&cmd->t_state_lock, flags); 2816 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2817 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2818 return 0; 2819 } 2820 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2821 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2822 2823 if (!from_transport) { 2824 int rc; 2825 2826 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2827 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 2828 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 2829 rc = translate_sense_reason(cmd, reason); 2830 if (rc) 2831 return rc; 2832 } 2833 2834 trace_target_cmd_complete(cmd); 2835 return cmd->se_tfo->queue_status(cmd); 2836 } 2837 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2838 2839 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2840 { 2841 if (!(cmd->transport_state & CMD_T_ABORTED)) 2842 return 0; 2843 2844 /* 2845 * If cmd has been aborted but either no status is to be sent or it has 2846 * already been sent, just return 2847 */ 2848 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) 2849 return 1; 2850 2851 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", 2852 cmd->t_task_cdb[0], cmd->tag); 2853 2854 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; 2855 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2856 trace_target_cmd_complete(cmd); 2857 cmd->se_tfo->queue_status(cmd); 2858 2859 return 1; 2860 } 2861 EXPORT_SYMBOL(transport_check_aborted_status); 2862 2863 void transport_send_task_abort(struct se_cmd *cmd) 2864 { 2865 unsigned long flags; 2866 2867 spin_lock_irqsave(&cmd->t_state_lock, flags); 2868 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 2869 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2870 return; 2871 } 2872 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2873 2874 /* 2875 * If there are still expected incoming fabric WRITEs, we wait 2876 * until until they have completed before sending a TASK_ABORTED 2877 * response. This response with TASK_ABORTED status will be 2878 * queued back to fabric module by transport_check_aborted_status(). 2879 */ 2880 if (cmd->data_direction == DMA_TO_DEVICE) { 2881 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2882 cmd->transport_state |= CMD_T_ABORTED; 2883 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2884 return; 2885 } 2886 } 2887 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2888 2889 transport_lun_remove_cmd(cmd); 2890 2891 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 2892 cmd->t_task_cdb[0], cmd->tag); 2893 2894 trace_target_cmd_complete(cmd); 2895 cmd->se_tfo->queue_status(cmd); 2896 } 2897 2898 static void target_tmr_work(struct work_struct *work) 2899 { 2900 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2901 struct se_device *dev = cmd->se_dev; 2902 struct se_tmr_req *tmr = cmd->se_tmr_req; 2903 int ret; 2904 2905 switch (tmr->function) { 2906 case TMR_ABORT_TASK: 2907 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2908 break; 2909 case TMR_ABORT_TASK_SET: 2910 case TMR_CLEAR_ACA: 2911 case TMR_CLEAR_TASK_SET: 2912 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2913 break; 2914 case TMR_LUN_RESET: 2915 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 2916 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 2917 TMR_FUNCTION_REJECTED; 2918 if (tmr->response == TMR_FUNCTION_COMPLETE) { 2919 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 2920 cmd->orig_fe_lun, 0x29, 2921 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 2922 } 2923 break; 2924 case TMR_TARGET_WARM_RESET: 2925 tmr->response = TMR_FUNCTION_REJECTED; 2926 break; 2927 case TMR_TARGET_COLD_RESET: 2928 tmr->response = TMR_FUNCTION_REJECTED; 2929 break; 2930 default: 2931 pr_err("Uknown TMR function: 0x%02x.\n", 2932 tmr->function); 2933 tmr->response = TMR_FUNCTION_REJECTED; 2934 break; 2935 } 2936 2937 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 2938 cmd->se_tfo->queue_tm_rsp(cmd); 2939 2940 transport_cmd_check_stop_to_fabric(cmd); 2941 } 2942 2943 int transport_generic_handle_tmr( 2944 struct se_cmd *cmd) 2945 { 2946 unsigned long flags; 2947 2948 spin_lock_irqsave(&cmd->t_state_lock, flags); 2949 cmd->transport_state |= CMD_T_ACTIVE; 2950 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2951 2952 INIT_WORK(&cmd->work, target_tmr_work); 2953 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 2954 return 0; 2955 } 2956 EXPORT_SYMBOL(transport_generic_handle_tmr); 2957 2958 bool 2959 target_check_wce(struct se_device *dev) 2960 { 2961 bool wce = false; 2962 2963 if (dev->transport->get_write_cache) 2964 wce = dev->transport->get_write_cache(dev); 2965 else if (dev->dev_attrib.emulate_write_cache > 0) 2966 wce = true; 2967 2968 return wce; 2969 } 2970 2971 bool 2972 target_check_fua(struct se_device *dev) 2973 { 2974 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 2975 } 2976