1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <linux/vmalloc.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi_proto.h> 42 #include <scsi/scsi_common.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 48 #include "target_core_internal.h" 49 #include "target_core_alua.h" 50 #include "target_core_pr.h" 51 #include "target_core_ua.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/target.h> 55 56 static struct workqueue_struct *target_completion_wq; 57 static struct kmem_cache *se_sess_cache; 58 struct kmem_cache *se_ua_cache; 59 struct kmem_cache *t10_pr_reg_cache; 60 struct kmem_cache *t10_alua_lu_gp_cache; 61 struct kmem_cache *t10_alua_lu_gp_mem_cache; 62 struct kmem_cache *t10_alua_tg_pt_gp_cache; 63 struct kmem_cache *t10_alua_lba_map_cache; 64 struct kmem_cache *t10_alua_lba_map_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void transport_handle_queue_full(struct se_cmd *cmd, 68 struct se_device *dev); 69 static int transport_put_cmd(struct se_cmd *cmd); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_lba_map_cache = kmem_cache_create( 122 "t10_alua_lba_map_cache", 123 sizeof(struct t10_alua_lba_map), 124 __alignof__(struct t10_alua_lba_map), 0, NULL); 125 if (!t10_alua_lba_map_cache) { 126 pr_err("kmem_cache_create() for t10_alua_lba_map_" 127 "cache failed\n"); 128 goto out_free_tg_pt_gp_cache; 129 } 130 t10_alua_lba_map_mem_cache = kmem_cache_create( 131 "t10_alua_lba_map_mem_cache", 132 sizeof(struct t10_alua_lba_map_member), 133 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 134 if (!t10_alua_lba_map_mem_cache) { 135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 136 "cache failed\n"); 137 goto out_free_lba_map_cache; 138 } 139 140 target_completion_wq = alloc_workqueue("target_completion", 141 WQ_MEM_RECLAIM, 0); 142 if (!target_completion_wq) 143 goto out_free_lba_map_mem_cache; 144 145 return 0; 146 147 out_free_lba_map_mem_cache: 148 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 149 out_free_lba_map_cache: 150 kmem_cache_destroy(t10_alua_lba_map_cache); 151 out_free_tg_pt_gp_cache: 152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 153 out_free_lu_gp_mem_cache: 154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 155 out_free_lu_gp_cache: 156 kmem_cache_destroy(t10_alua_lu_gp_cache); 157 out_free_pr_reg_cache: 158 kmem_cache_destroy(t10_pr_reg_cache); 159 out_free_ua_cache: 160 kmem_cache_destroy(se_ua_cache); 161 out_free_sess_cache: 162 kmem_cache_destroy(se_sess_cache); 163 out: 164 return -ENOMEM; 165 } 166 167 void release_se_kmem_caches(void) 168 { 169 destroy_workqueue(target_completion_wq); 170 kmem_cache_destroy(se_sess_cache); 171 kmem_cache_destroy(se_ua_cache); 172 kmem_cache_destroy(t10_pr_reg_cache); 173 kmem_cache_destroy(t10_alua_lu_gp_cache); 174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 176 kmem_cache_destroy(t10_alua_lba_map_cache); 177 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 178 } 179 180 /* This code ensures unique mib indexes are handed out. */ 181 static DEFINE_SPINLOCK(scsi_mib_index_lock); 182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 183 184 /* 185 * Allocate a new row index for the entry type specified 186 */ 187 u32 scsi_get_new_index(scsi_index_t type) 188 { 189 u32 new_index; 190 191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 192 193 spin_lock(&scsi_mib_index_lock); 194 new_index = ++scsi_mib_index[type]; 195 spin_unlock(&scsi_mib_index_lock); 196 197 return new_index; 198 } 199 200 void transport_subsystem_check_init(void) 201 { 202 int ret; 203 static int sub_api_initialized; 204 205 if (sub_api_initialized) 206 return; 207 208 ret = request_module("target_core_iblock"); 209 if (ret != 0) 210 pr_err("Unable to load target_core_iblock\n"); 211 212 ret = request_module("target_core_file"); 213 if (ret != 0) 214 pr_err("Unable to load target_core_file\n"); 215 216 ret = request_module("target_core_pscsi"); 217 if (ret != 0) 218 pr_err("Unable to load target_core_pscsi\n"); 219 220 ret = request_module("target_core_user"); 221 if (ret != 0) 222 pr_err("Unable to load target_core_user\n"); 223 224 sub_api_initialized = 1; 225 } 226 227 struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) 228 { 229 struct se_session *se_sess; 230 231 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 232 if (!se_sess) { 233 pr_err("Unable to allocate struct se_session from" 234 " se_sess_cache\n"); 235 return ERR_PTR(-ENOMEM); 236 } 237 INIT_LIST_HEAD(&se_sess->sess_list); 238 INIT_LIST_HEAD(&se_sess->sess_acl_list); 239 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 240 INIT_LIST_HEAD(&se_sess->sess_wait_list); 241 spin_lock_init(&se_sess->sess_cmd_lock); 242 se_sess->sup_prot_ops = sup_prot_ops; 243 244 return se_sess; 245 } 246 EXPORT_SYMBOL(transport_init_session); 247 248 int transport_alloc_session_tags(struct se_session *se_sess, 249 unsigned int tag_num, unsigned int tag_size) 250 { 251 int rc; 252 253 se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, 254 GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 255 if (!se_sess->sess_cmd_map) { 256 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); 257 if (!se_sess->sess_cmd_map) { 258 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 259 return -ENOMEM; 260 } 261 } 262 263 rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); 264 if (rc < 0) { 265 pr_err("Unable to init se_sess->sess_tag_pool," 266 " tag_num: %u\n", tag_num); 267 kvfree(se_sess->sess_cmd_map); 268 se_sess->sess_cmd_map = NULL; 269 return -ENOMEM; 270 } 271 272 return 0; 273 } 274 EXPORT_SYMBOL(transport_alloc_session_tags); 275 276 struct se_session *transport_init_session_tags(unsigned int tag_num, 277 unsigned int tag_size, 278 enum target_prot_op sup_prot_ops) 279 { 280 struct se_session *se_sess; 281 int rc; 282 283 if (tag_num != 0 && !tag_size) { 284 pr_err("init_session_tags called with percpu-ida tag_num:" 285 " %u, but zero tag_size\n", tag_num); 286 return ERR_PTR(-EINVAL); 287 } 288 if (!tag_num && tag_size) { 289 pr_err("init_session_tags called with percpu-ida tag_size:" 290 " %u, but zero tag_num\n", tag_size); 291 return ERR_PTR(-EINVAL); 292 } 293 294 se_sess = transport_init_session(sup_prot_ops); 295 if (IS_ERR(se_sess)) 296 return se_sess; 297 298 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 299 if (rc < 0) { 300 transport_free_session(se_sess); 301 return ERR_PTR(-ENOMEM); 302 } 303 304 return se_sess; 305 } 306 EXPORT_SYMBOL(transport_init_session_tags); 307 308 /* 309 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 310 */ 311 void __transport_register_session( 312 struct se_portal_group *se_tpg, 313 struct se_node_acl *se_nacl, 314 struct se_session *se_sess, 315 void *fabric_sess_ptr) 316 { 317 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 318 unsigned char buf[PR_REG_ISID_LEN]; 319 320 se_sess->se_tpg = se_tpg; 321 se_sess->fabric_sess_ptr = fabric_sess_ptr; 322 /* 323 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 324 * 325 * Only set for struct se_session's that will actually be moving I/O. 326 * eg: *NOT* discovery sessions. 327 */ 328 if (se_nacl) { 329 /* 330 * 331 * Determine if fabric allows for T10-PI feature bits exposed to 332 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 333 * 334 * If so, then always save prot_type on a per se_node_acl node 335 * basis and re-instate the previous sess_prot_type to avoid 336 * disabling PI from below any previously initiator side 337 * registered LUNs. 338 */ 339 if (se_nacl->saved_prot_type) 340 se_sess->sess_prot_type = se_nacl->saved_prot_type; 341 else if (tfo->tpg_check_prot_fabric_only) 342 se_sess->sess_prot_type = se_nacl->saved_prot_type = 343 tfo->tpg_check_prot_fabric_only(se_tpg); 344 /* 345 * If the fabric module supports an ISID based TransportID, 346 * save this value in binary from the fabric I_T Nexus now. 347 */ 348 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 349 memset(&buf[0], 0, PR_REG_ISID_LEN); 350 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 351 &buf[0], PR_REG_ISID_LEN); 352 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 353 } 354 355 spin_lock_irq(&se_nacl->nacl_sess_lock); 356 /* 357 * The se_nacl->nacl_sess pointer will be set to the 358 * last active I_T Nexus for each struct se_node_acl. 359 */ 360 se_nacl->nacl_sess = se_sess; 361 362 list_add_tail(&se_sess->sess_acl_list, 363 &se_nacl->acl_sess_list); 364 spin_unlock_irq(&se_nacl->nacl_sess_lock); 365 } 366 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 367 368 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 369 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 370 } 371 EXPORT_SYMBOL(__transport_register_session); 372 373 void transport_register_session( 374 struct se_portal_group *se_tpg, 375 struct se_node_acl *se_nacl, 376 struct se_session *se_sess, 377 void *fabric_sess_ptr) 378 { 379 unsigned long flags; 380 381 spin_lock_irqsave(&se_tpg->session_lock, flags); 382 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 383 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 384 } 385 EXPORT_SYMBOL(transport_register_session); 386 387 struct se_session * 388 target_alloc_session(struct se_portal_group *tpg, 389 unsigned int tag_num, unsigned int tag_size, 390 enum target_prot_op prot_op, 391 const char *initiatorname, void *private, 392 int (*callback)(struct se_portal_group *, 393 struct se_session *, void *)) 394 { 395 struct se_session *sess; 396 397 /* 398 * If the fabric driver is using percpu-ida based pre allocation 399 * of I/O descriptor tags, go ahead and perform that setup now.. 400 */ 401 if (tag_num != 0) 402 sess = transport_init_session_tags(tag_num, tag_size, prot_op); 403 else 404 sess = transport_init_session(prot_op); 405 406 if (IS_ERR(sess)) 407 return sess; 408 409 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, 410 (unsigned char *)initiatorname); 411 if (!sess->se_node_acl) { 412 transport_free_session(sess); 413 return ERR_PTR(-EACCES); 414 } 415 /* 416 * Go ahead and perform any remaining fabric setup that is 417 * required before transport_register_session(). 418 */ 419 if (callback != NULL) { 420 int rc = callback(tpg, sess, private); 421 if (rc) { 422 transport_free_session(sess); 423 return ERR_PTR(rc); 424 } 425 } 426 427 transport_register_session(tpg, sess->se_node_acl, sess, private); 428 return sess; 429 } 430 EXPORT_SYMBOL(target_alloc_session); 431 432 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 433 { 434 struct se_session *se_sess; 435 ssize_t len = 0; 436 437 spin_lock_bh(&se_tpg->session_lock); 438 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 439 if (!se_sess->se_node_acl) 440 continue; 441 if (!se_sess->se_node_acl->dynamic_node_acl) 442 continue; 443 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 444 break; 445 446 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 447 se_sess->se_node_acl->initiatorname); 448 len += 1; /* Include NULL terminator */ 449 } 450 spin_unlock_bh(&se_tpg->session_lock); 451 452 return len; 453 } 454 EXPORT_SYMBOL(target_show_dynamic_sessions); 455 456 static void target_complete_nacl(struct kref *kref) 457 { 458 struct se_node_acl *nacl = container_of(kref, 459 struct se_node_acl, acl_kref); 460 struct se_portal_group *se_tpg = nacl->se_tpg; 461 462 if (!nacl->dynamic_stop) { 463 complete(&nacl->acl_free_comp); 464 return; 465 } 466 467 mutex_lock(&se_tpg->acl_node_mutex); 468 list_del(&nacl->acl_list); 469 mutex_unlock(&se_tpg->acl_node_mutex); 470 471 core_tpg_wait_for_nacl_pr_ref(nacl); 472 core_free_device_list_for_node(nacl, se_tpg); 473 kfree(nacl); 474 } 475 476 void target_put_nacl(struct se_node_acl *nacl) 477 { 478 kref_put(&nacl->acl_kref, target_complete_nacl); 479 } 480 EXPORT_SYMBOL(target_put_nacl); 481 482 void transport_deregister_session_configfs(struct se_session *se_sess) 483 { 484 struct se_node_acl *se_nacl; 485 unsigned long flags; 486 /* 487 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 488 */ 489 se_nacl = se_sess->se_node_acl; 490 if (se_nacl) { 491 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 492 if (!list_empty(&se_sess->sess_acl_list)) 493 list_del_init(&se_sess->sess_acl_list); 494 /* 495 * If the session list is empty, then clear the pointer. 496 * Otherwise, set the struct se_session pointer from the tail 497 * element of the per struct se_node_acl active session list. 498 */ 499 if (list_empty(&se_nacl->acl_sess_list)) 500 se_nacl->nacl_sess = NULL; 501 else { 502 se_nacl->nacl_sess = container_of( 503 se_nacl->acl_sess_list.prev, 504 struct se_session, sess_acl_list); 505 } 506 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 507 } 508 } 509 EXPORT_SYMBOL(transport_deregister_session_configfs); 510 511 void transport_free_session(struct se_session *se_sess) 512 { 513 struct se_node_acl *se_nacl = se_sess->se_node_acl; 514 515 /* 516 * Drop the se_node_acl->nacl_kref obtained from within 517 * core_tpg_get_initiator_node_acl(). 518 */ 519 if (se_nacl) { 520 struct se_portal_group *se_tpg = se_nacl->se_tpg; 521 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; 522 unsigned long flags; 523 524 se_sess->se_node_acl = NULL; 525 526 /* 527 * Also determine if we need to drop the extra ->cmd_kref if 528 * it had been previously dynamically generated, and 529 * the endpoint is not caching dynamic ACLs. 530 */ 531 mutex_lock(&se_tpg->acl_node_mutex); 532 if (se_nacl->dynamic_node_acl && 533 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 534 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 535 if (list_empty(&se_nacl->acl_sess_list)) 536 se_nacl->dynamic_stop = true; 537 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 538 539 if (se_nacl->dynamic_stop) 540 list_del(&se_nacl->acl_list); 541 } 542 mutex_unlock(&se_tpg->acl_node_mutex); 543 544 if (se_nacl->dynamic_stop) 545 target_put_nacl(se_nacl); 546 547 target_put_nacl(se_nacl); 548 } 549 if (se_sess->sess_cmd_map) { 550 percpu_ida_destroy(&se_sess->sess_tag_pool); 551 kvfree(se_sess->sess_cmd_map); 552 } 553 kmem_cache_free(se_sess_cache, se_sess); 554 } 555 EXPORT_SYMBOL(transport_free_session); 556 557 void transport_deregister_session(struct se_session *se_sess) 558 { 559 struct se_portal_group *se_tpg = se_sess->se_tpg; 560 unsigned long flags; 561 562 if (!se_tpg) { 563 transport_free_session(se_sess); 564 return; 565 } 566 567 spin_lock_irqsave(&se_tpg->session_lock, flags); 568 list_del(&se_sess->sess_list); 569 se_sess->se_tpg = NULL; 570 se_sess->fabric_sess_ptr = NULL; 571 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 572 573 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 574 se_tpg->se_tpg_tfo->get_fabric_name()); 575 /* 576 * If last kref is dropping now for an explicit NodeACL, awake sleeping 577 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 578 * removal context from within transport_free_session() code. 579 * 580 * For dynamic ACL, target_put_nacl() uses target_complete_nacl() 581 * to release all remaining generate_node_acl=1 created ACL resources. 582 */ 583 584 transport_free_session(se_sess); 585 } 586 EXPORT_SYMBOL(transport_deregister_session); 587 588 static void target_remove_from_state_list(struct se_cmd *cmd) 589 { 590 struct se_device *dev = cmd->se_dev; 591 unsigned long flags; 592 593 if (!dev) 594 return; 595 596 spin_lock_irqsave(&dev->execute_task_lock, flags); 597 if (cmd->state_active) { 598 list_del(&cmd->state_list); 599 cmd->state_active = false; 600 } 601 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 602 } 603 604 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 605 { 606 unsigned long flags; 607 608 target_remove_from_state_list(cmd); 609 610 /* 611 * Clear struct se_cmd->se_lun before the handoff to FE. 612 */ 613 cmd->se_lun = NULL; 614 615 spin_lock_irqsave(&cmd->t_state_lock, flags); 616 /* 617 * Determine if frontend context caller is requesting the stopping of 618 * this command for frontend exceptions. 619 */ 620 if (cmd->transport_state & CMD_T_STOP) { 621 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 622 __func__, __LINE__, cmd->tag); 623 624 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 625 626 complete_all(&cmd->t_transport_stop_comp); 627 return 1; 628 } 629 cmd->transport_state &= ~CMD_T_ACTIVE; 630 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 631 632 /* 633 * Some fabric modules like tcm_loop can release their internally 634 * allocated I/O reference and struct se_cmd now. 635 * 636 * Fabric modules are expected to return '1' here if the se_cmd being 637 * passed is released at this point, or zero if not being released. 638 */ 639 return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd) 640 : 0; 641 } 642 643 static void transport_lun_remove_cmd(struct se_cmd *cmd) 644 { 645 struct se_lun *lun = cmd->se_lun; 646 647 if (!lun) 648 return; 649 650 if (cmpxchg(&cmd->lun_ref_active, true, false)) 651 percpu_ref_put(&lun->lun_ref); 652 } 653 654 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 655 { 656 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); 657 658 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 659 transport_lun_remove_cmd(cmd); 660 /* 661 * Allow the fabric driver to unmap any resources before 662 * releasing the descriptor via TFO->release_cmd() 663 */ 664 if (remove) 665 cmd->se_tfo->aborted_task(cmd); 666 667 if (transport_cmd_check_stop_to_fabric(cmd)) 668 return; 669 if (remove && ack_kref) 670 transport_put_cmd(cmd); 671 } 672 673 static void target_complete_failure_work(struct work_struct *work) 674 { 675 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 676 677 transport_generic_request_failure(cmd, 678 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 679 } 680 681 /* 682 * Used when asking transport to copy Sense Data from the underlying 683 * Linux/SCSI struct scsi_cmnd 684 */ 685 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 686 { 687 struct se_device *dev = cmd->se_dev; 688 689 WARN_ON(!cmd->se_lun); 690 691 if (!dev) 692 return NULL; 693 694 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 695 return NULL; 696 697 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 698 699 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 700 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 701 return cmd->sense_buffer; 702 } 703 704 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 705 { 706 struct se_device *dev = cmd->se_dev; 707 int success = scsi_status == GOOD; 708 unsigned long flags; 709 710 cmd->scsi_status = scsi_status; 711 712 713 spin_lock_irqsave(&cmd->t_state_lock, flags); 714 715 if (dev && dev->transport->transport_complete) { 716 dev->transport->transport_complete(cmd, 717 cmd->t_data_sg, 718 transport_get_sense_buffer(cmd)); 719 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 720 success = 1; 721 } 722 723 /* 724 * Check for case where an explicit ABORT_TASK has been received 725 * and transport_wait_for_tasks() will be waiting for completion.. 726 */ 727 if (cmd->transport_state & CMD_T_ABORTED || 728 cmd->transport_state & CMD_T_STOP) { 729 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 730 complete_all(&cmd->t_transport_stop_comp); 731 return; 732 } else if (!success) { 733 INIT_WORK(&cmd->work, target_complete_failure_work); 734 } else { 735 INIT_WORK(&cmd->work, target_complete_ok_work); 736 } 737 738 cmd->t_state = TRANSPORT_COMPLETE; 739 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 740 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 741 742 if (cmd->se_cmd_flags & SCF_USE_CPUID) 743 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 744 else 745 queue_work(target_completion_wq, &cmd->work); 746 } 747 EXPORT_SYMBOL(target_complete_cmd); 748 749 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 750 { 751 if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { 752 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 753 cmd->residual_count += cmd->data_length - length; 754 } else { 755 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 756 cmd->residual_count = cmd->data_length - length; 757 } 758 759 cmd->data_length = length; 760 } 761 762 target_complete_cmd(cmd, scsi_status); 763 } 764 EXPORT_SYMBOL(target_complete_cmd_with_length); 765 766 static void target_add_to_state_list(struct se_cmd *cmd) 767 { 768 struct se_device *dev = cmd->se_dev; 769 unsigned long flags; 770 771 spin_lock_irqsave(&dev->execute_task_lock, flags); 772 if (!cmd->state_active) { 773 list_add_tail(&cmd->state_list, &dev->state_list); 774 cmd->state_active = true; 775 } 776 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 777 } 778 779 /* 780 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 781 */ 782 static void transport_write_pending_qf(struct se_cmd *cmd); 783 static void transport_complete_qf(struct se_cmd *cmd); 784 785 void target_qf_do_work(struct work_struct *work) 786 { 787 struct se_device *dev = container_of(work, struct se_device, 788 qf_work_queue); 789 LIST_HEAD(qf_cmd_list); 790 struct se_cmd *cmd, *cmd_tmp; 791 792 spin_lock_irq(&dev->qf_cmd_lock); 793 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 794 spin_unlock_irq(&dev->qf_cmd_lock); 795 796 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 797 list_del(&cmd->se_qf_node); 798 atomic_dec_mb(&dev->dev_qf_count); 799 800 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 801 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 802 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 803 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 804 : "UNKNOWN"); 805 806 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 807 transport_write_pending_qf(cmd); 808 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) 809 transport_complete_qf(cmd); 810 } 811 } 812 813 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 814 { 815 switch (cmd->data_direction) { 816 case DMA_NONE: 817 return "NONE"; 818 case DMA_FROM_DEVICE: 819 return "READ"; 820 case DMA_TO_DEVICE: 821 return "WRITE"; 822 case DMA_BIDIRECTIONAL: 823 return "BIDI"; 824 default: 825 break; 826 } 827 828 return "UNKNOWN"; 829 } 830 831 void transport_dump_dev_state( 832 struct se_device *dev, 833 char *b, 834 int *bl) 835 { 836 *bl += sprintf(b + *bl, "Status: "); 837 if (dev->export_count) 838 *bl += sprintf(b + *bl, "ACTIVATED"); 839 else 840 *bl += sprintf(b + *bl, "DEACTIVATED"); 841 842 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 843 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 844 dev->dev_attrib.block_size, 845 dev->dev_attrib.hw_max_sectors); 846 *bl += sprintf(b + *bl, " "); 847 } 848 849 void transport_dump_vpd_proto_id( 850 struct t10_vpd *vpd, 851 unsigned char *p_buf, 852 int p_buf_len) 853 { 854 unsigned char buf[VPD_TMP_BUF_SIZE]; 855 int len; 856 857 memset(buf, 0, VPD_TMP_BUF_SIZE); 858 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 859 860 switch (vpd->protocol_identifier) { 861 case 0x00: 862 sprintf(buf+len, "Fibre Channel\n"); 863 break; 864 case 0x10: 865 sprintf(buf+len, "Parallel SCSI\n"); 866 break; 867 case 0x20: 868 sprintf(buf+len, "SSA\n"); 869 break; 870 case 0x30: 871 sprintf(buf+len, "IEEE 1394\n"); 872 break; 873 case 0x40: 874 sprintf(buf+len, "SCSI Remote Direct Memory Access" 875 " Protocol\n"); 876 break; 877 case 0x50: 878 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 879 break; 880 case 0x60: 881 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 882 break; 883 case 0x70: 884 sprintf(buf+len, "Automation/Drive Interface Transport" 885 " Protocol\n"); 886 break; 887 case 0x80: 888 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 889 break; 890 default: 891 sprintf(buf+len, "Unknown 0x%02x\n", 892 vpd->protocol_identifier); 893 break; 894 } 895 896 if (p_buf) 897 strncpy(p_buf, buf, p_buf_len); 898 else 899 pr_debug("%s", buf); 900 } 901 902 void 903 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 904 { 905 /* 906 * Check if the Protocol Identifier Valid (PIV) bit is set.. 907 * 908 * from spc3r23.pdf section 7.5.1 909 */ 910 if (page_83[1] & 0x80) { 911 vpd->protocol_identifier = (page_83[0] & 0xf0); 912 vpd->protocol_identifier_set = 1; 913 transport_dump_vpd_proto_id(vpd, NULL, 0); 914 } 915 } 916 EXPORT_SYMBOL(transport_set_vpd_proto_id); 917 918 int transport_dump_vpd_assoc( 919 struct t10_vpd *vpd, 920 unsigned char *p_buf, 921 int p_buf_len) 922 { 923 unsigned char buf[VPD_TMP_BUF_SIZE]; 924 int ret = 0; 925 int len; 926 927 memset(buf, 0, VPD_TMP_BUF_SIZE); 928 len = sprintf(buf, "T10 VPD Identifier Association: "); 929 930 switch (vpd->association) { 931 case 0x00: 932 sprintf(buf+len, "addressed logical unit\n"); 933 break; 934 case 0x10: 935 sprintf(buf+len, "target port\n"); 936 break; 937 case 0x20: 938 sprintf(buf+len, "SCSI target device\n"); 939 break; 940 default: 941 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 942 ret = -EINVAL; 943 break; 944 } 945 946 if (p_buf) 947 strncpy(p_buf, buf, p_buf_len); 948 else 949 pr_debug("%s", buf); 950 951 return ret; 952 } 953 954 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 955 { 956 /* 957 * The VPD identification association.. 958 * 959 * from spc3r23.pdf Section 7.6.3.1 Table 297 960 */ 961 vpd->association = (page_83[1] & 0x30); 962 return transport_dump_vpd_assoc(vpd, NULL, 0); 963 } 964 EXPORT_SYMBOL(transport_set_vpd_assoc); 965 966 int transport_dump_vpd_ident_type( 967 struct t10_vpd *vpd, 968 unsigned char *p_buf, 969 int p_buf_len) 970 { 971 unsigned char buf[VPD_TMP_BUF_SIZE]; 972 int ret = 0; 973 int len; 974 975 memset(buf, 0, VPD_TMP_BUF_SIZE); 976 len = sprintf(buf, "T10 VPD Identifier Type: "); 977 978 switch (vpd->device_identifier_type) { 979 case 0x00: 980 sprintf(buf+len, "Vendor specific\n"); 981 break; 982 case 0x01: 983 sprintf(buf+len, "T10 Vendor ID based\n"); 984 break; 985 case 0x02: 986 sprintf(buf+len, "EUI-64 based\n"); 987 break; 988 case 0x03: 989 sprintf(buf+len, "NAA\n"); 990 break; 991 case 0x04: 992 sprintf(buf+len, "Relative target port identifier\n"); 993 break; 994 case 0x08: 995 sprintf(buf+len, "SCSI name string\n"); 996 break; 997 default: 998 sprintf(buf+len, "Unsupported: 0x%02x\n", 999 vpd->device_identifier_type); 1000 ret = -EINVAL; 1001 break; 1002 } 1003 1004 if (p_buf) { 1005 if (p_buf_len < strlen(buf)+1) 1006 return -EINVAL; 1007 strncpy(p_buf, buf, p_buf_len); 1008 } else { 1009 pr_debug("%s", buf); 1010 } 1011 1012 return ret; 1013 } 1014 1015 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1016 { 1017 /* 1018 * The VPD identifier type.. 1019 * 1020 * from spc3r23.pdf Section 7.6.3.1 Table 298 1021 */ 1022 vpd->device_identifier_type = (page_83[1] & 0x0f); 1023 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1024 } 1025 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1026 1027 int transport_dump_vpd_ident( 1028 struct t10_vpd *vpd, 1029 unsigned char *p_buf, 1030 int p_buf_len) 1031 { 1032 unsigned char buf[VPD_TMP_BUF_SIZE]; 1033 int ret = 0; 1034 1035 memset(buf, 0, VPD_TMP_BUF_SIZE); 1036 1037 switch (vpd->device_identifier_code_set) { 1038 case 0x01: /* Binary */ 1039 snprintf(buf, sizeof(buf), 1040 "T10 VPD Binary Device Identifier: %s\n", 1041 &vpd->device_identifier[0]); 1042 break; 1043 case 0x02: /* ASCII */ 1044 snprintf(buf, sizeof(buf), 1045 "T10 VPD ASCII Device Identifier: %s\n", 1046 &vpd->device_identifier[0]); 1047 break; 1048 case 0x03: /* UTF-8 */ 1049 snprintf(buf, sizeof(buf), 1050 "T10 VPD UTF-8 Device Identifier: %s\n", 1051 &vpd->device_identifier[0]); 1052 break; 1053 default: 1054 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1055 " 0x%02x", vpd->device_identifier_code_set); 1056 ret = -EINVAL; 1057 break; 1058 } 1059 1060 if (p_buf) 1061 strncpy(p_buf, buf, p_buf_len); 1062 else 1063 pr_debug("%s", buf); 1064 1065 return ret; 1066 } 1067 1068 int 1069 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1070 { 1071 static const char hex_str[] = "0123456789abcdef"; 1072 int j = 0, i = 4; /* offset to start of the identifier */ 1073 1074 /* 1075 * The VPD Code Set (encoding) 1076 * 1077 * from spc3r23.pdf Section 7.6.3.1 Table 296 1078 */ 1079 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1080 switch (vpd->device_identifier_code_set) { 1081 case 0x01: /* Binary */ 1082 vpd->device_identifier[j++] = 1083 hex_str[vpd->device_identifier_type]; 1084 while (i < (4 + page_83[3])) { 1085 vpd->device_identifier[j++] = 1086 hex_str[(page_83[i] & 0xf0) >> 4]; 1087 vpd->device_identifier[j++] = 1088 hex_str[page_83[i] & 0x0f]; 1089 i++; 1090 } 1091 break; 1092 case 0x02: /* ASCII */ 1093 case 0x03: /* UTF-8 */ 1094 while (i < (4 + page_83[3])) 1095 vpd->device_identifier[j++] = page_83[i++]; 1096 break; 1097 default: 1098 break; 1099 } 1100 1101 return transport_dump_vpd_ident(vpd, NULL, 0); 1102 } 1103 EXPORT_SYMBOL(transport_set_vpd_ident); 1104 1105 static sense_reason_t 1106 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1107 unsigned int size) 1108 { 1109 u32 mtl; 1110 1111 if (!cmd->se_tfo->max_data_sg_nents) 1112 return TCM_NO_SENSE; 1113 /* 1114 * Check if fabric enforced maximum SGL entries per I/O descriptor 1115 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1116 * residual_count and reduce original cmd->data_length to maximum 1117 * length based on single PAGE_SIZE entry scatter-lists. 1118 */ 1119 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1120 if (cmd->data_length > mtl) { 1121 /* 1122 * If an existing CDB overflow is present, calculate new residual 1123 * based on CDB size minus fabric maximum transfer length. 1124 * 1125 * If an existing CDB underflow is present, calculate new residual 1126 * based on original cmd->data_length minus fabric maximum transfer 1127 * length. 1128 * 1129 * Otherwise, set the underflow residual based on cmd->data_length 1130 * minus fabric maximum transfer length. 1131 */ 1132 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1133 cmd->residual_count = (size - mtl); 1134 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1135 u32 orig_dl = size + cmd->residual_count; 1136 cmd->residual_count = (orig_dl - mtl); 1137 } else { 1138 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1139 cmd->residual_count = (cmd->data_length - mtl); 1140 } 1141 cmd->data_length = mtl; 1142 /* 1143 * Reset sbc_check_prot() calculated protection payload 1144 * length based upon the new smaller MTL. 1145 */ 1146 if (cmd->prot_length) { 1147 u32 sectors = (mtl / dev->dev_attrib.block_size); 1148 cmd->prot_length = dev->prot_length * sectors; 1149 } 1150 } 1151 return TCM_NO_SENSE; 1152 } 1153 1154 sense_reason_t 1155 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1156 { 1157 struct se_device *dev = cmd->se_dev; 1158 1159 if (cmd->unknown_data_length) { 1160 cmd->data_length = size; 1161 } else if (size != cmd->data_length) { 1162 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1163 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1164 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1165 cmd->data_length, size, cmd->t_task_cdb[0]); 1166 1167 if (cmd->data_direction == DMA_TO_DEVICE && 1168 cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1169 pr_err("Rejecting underflow/overflow WRITE data\n"); 1170 return TCM_INVALID_CDB_FIELD; 1171 } 1172 /* 1173 * Reject READ_* or WRITE_* with overflow/underflow for 1174 * type SCF_SCSI_DATA_CDB. 1175 */ 1176 if (dev->dev_attrib.block_size != 512) { 1177 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1178 " CDB on non 512-byte sector setup subsystem" 1179 " plugin: %s\n", dev->transport->name); 1180 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1181 return TCM_INVALID_CDB_FIELD; 1182 } 1183 /* 1184 * For the overflow case keep the existing fabric provided 1185 * ->data_length. Otherwise for the underflow case, reset 1186 * ->data_length to the smaller SCSI expected data transfer 1187 * length. 1188 */ 1189 if (size > cmd->data_length) { 1190 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1191 cmd->residual_count = (size - cmd->data_length); 1192 } else { 1193 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1194 cmd->residual_count = (cmd->data_length - size); 1195 cmd->data_length = size; 1196 } 1197 } 1198 1199 return target_check_max_data_sg_nents(cmd, dev, size); 1200 1201 } 1202 1203 /* 1204 * Used by fabric modules containing a local struct se_cmd within their 1205 * fabric dependent per I/O descriptor. 1206 * 1207 * Preserves the value of @cmd->tag. 1208 */ 1209 void transport_init_se_cmd( 1210 struct se_cmd *cmd, 1211 const struct target_core_fabric_ops *tfo, 1212 struct se_session *se_sess, 1213 u32 data_length, 1214 int data_direction, 1215 int task_attr, 1216 unsigned char *sense_buffer) 1217 { 1218 INIT_LIST_HEAD(&cmd->se_delayed_node); 1219 INIT_LIST_HEAD(&cmd->se_qf_node); 1220 INIT_LIST_HEAD(&cmd->se_cmd_list); 1221 INIT_LIST_HEAD(&cmd->state_list); 1222 init_completion(&cmd->t_transport_stop_comp); 1223 init_completion(&cmd->cmd_wait_comp); 1224 spin_lock_init(&cmd->t_state_lock); 1225 kref_init(&cmd->cmd_kref); 1226 1227 cmd->se_tfo = tfo; 1228 cmd->se_sess = se_sess; 1229 cmd->data_length = data_length; 1230 cmd->data_direction = data_direction; 1231 cmd->sam_task_attr = task_attr; 1232 cmd->sense_buffer = sense_buffer; 1233 1234 cmd->state_active = false; 1235 } 1236 EXPORT_SYMBOL(transport_init_se_cmd); 1237 1238 static sense_reason_t 1239 transport_check_alloc_task_attr(struct se_cmd *cmd) 1240 { 1241 struct se_device *dev = cmd->se_dev; 1242 1243 /* 1244 * Check if SAM Task Attribute emulation is enabled for this 1245 * struct se_device storage object 1246 */ 1247 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1248 return 0; 1249 1250 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1251 pr_debug("SAM Task Attribute ACA" 1252 " emulation is not supported\n"); 1253 return TCM_INVALID_CDB_FIELD; 1254 } 1255 1256 return 0; 1257 } 1258 1259 sense_reason_t 1260 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1261 { 1262 struct se_device *dev = cmd->se_dev; 1263 sense_reason_t ret; 1264 1265 /* 1266 * Ensure that the received CDB is less than the max (252 + 8) bytes 1267 * for VARIABLE_LENGTH_CMD 1268 */ 1269 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1270 pr_err("Received SCSI CDB with command_size: %d that" 1271 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1272 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1273 return TCM_INVALID_CDB_FIELD; 1274 } 1275 /* 1276 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1277 * allocate the additional extended CDB buffer now.. Otherwise 1278 * setup the pointer from __t_task_cdb to t_task_cdb. 1279 */ 1280 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1281 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1282 GFP_KERNEL); 1283 if (!cmd->t_task_cdb) { 1284 pr_err("Unable to allocate cmd->t_task_cdb" 1285 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1286 scsi_command_size(cdb), 1287 (unsigned long)sizeof(cmd->__t_task_cdb)); 1288 return TCM_OUT_OF_RESOURCES; 1289 } 1290 } else 1291 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1292 /* 1293 * Copy the original CDB into cmd-> 1294 */ 1295 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1296 1297 trace_target_sequencer_start(cmd); 1298 1299 ret = dev->transport->parse_cdb(cmd); 1300 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1301 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1302 cmd->se_tfo->get_fabric_name(), 1303 cmd->se_sess->se_node_acl->initiatorname, 1304 cmd->t_task_cdb[0]); 1305 if (ret) 1306 return ret; 1307 1308 ret = transport_check_alloc_task_attr(cmd); 1309 if (ret) 1310 return ret; 1311 1312 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1313 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1314 return 0; 1315 } 1316 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1317 1318 /* 1319 * Used by fabric module frontends to queue tasks directly. 1320 * May only be used from process context. 1321 */ 1322 int transport_handle_cdb_direct( 1323 struct se_cmd *cmd) 1324 { 1325 sense_reason_t ret; 1326 1327 if (!cmd->se_lun) { 1328 dump_stack(); 1329 pr_err("cmd->se_lun is NULL\n"); 1330 return -EINVAL; 1331 } 1332 if (in_interrupt()) { 1333 dump_stack(); 1334 pr_err("transport_generic_handle_cdb cannot be called" 1335 " from interrupt context\n"); 1336 return -EINVAL; 1337 } 1338 /* 1339 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1340 * outstanding descriptors are handled correctly during shutdown via 1341 * transport_wait_for_tasks() 1342 * 1343 * Also, we don't take cmd->t_state_lock here as we only expect 1344 * this to be called for initial descriptor submission. 1345 */ 1346 cmd->t_state = TRANSPORT_NEW_CMD; 1347 cmd->transport_state |= CMD_T_ACTIVE; 1348 1349 /* 1350 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1351 * so follow TRANSPORT_NEW_CMD processing thread context usage 1352 * and call transport_generic_request_failure() if necessary.. 1353 */ 1354 ret = transport_generic_new_cmd(cmd); 1355 if (ret) 1356 transport_generic_request_failure(cmd, ret); 1357 return 0; 1358 } 1359 EXPORT_SYMBOL(transport_handle_cdb_direct); 1360 1361 sense_reason_t 1362 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1363 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1364 { 1365 if (!sgl || !sgl_count) 1366 return 0; 1367 1368 /* 1369 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1370 * scatterlists already have been set to follow what the fabric 1371 * passes for the original expected data transfer length. 1372 */ 1373 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1374 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1375 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1376 return TCM_INVALID_CDB_FIELD; 1377 } 1378 1379 cmd->t_data_sg = sgl; 1380 cmd->t_data_nents = sgl_count; 1381 cmd->t_bidi_data_sg = sgl_bidi; 1382 cmd->t_bidi_data_nents = sgl_bidi_count; 1383 1384 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1385 return 0; 1386 } 1387 1388 /* 1389 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1390 * se_cmd + use pre-allocated SGL memory. 1391 * 1392 * @se_cmd: command descriptor to submit 1393 * @se_sess: associated se_sess for endpoint 1394 * @cdb: pointer to SCSI CDB 1395 * @sense: pointer to SCSI sense buffer 1396 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1397 * @data_length: fabric expected data transfer length 1398 * @task_addr: SAM task attribute 1399 * @data_dir: DMA data direction 1400 * @flags: flags for command submission from target_sc_flags_tables 1401 * @sgl: struct scatterlist memory for unidirectional mapping 1402 * @sgl_count: scatterlist count for unidirectional mapping 1403 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1404 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1405 * @sgl_prot: struct scatterlist memory protection information 1406 * @sgl_prot_count: scatterlist count for protection information 1407 * 1408 * Task tags are supported if the caller has set @se_cmd->tag. 1409 * 1410 * Returns non zero to signal active I/O shutdown failure. All other 1411 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1412 * but still return zero here. 1413 * 1414 * This may only be called from process context, and also currently 1415 * assumes internal allocation of fabric payload buffer by target-core. 1416 */ 1417 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1418 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1419 u32 data_length, int task_attr, int data_dir, int flags, 1420 struct scatterlist *sgl, u32 sgl_count, 1421 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1422 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1423 { 1424 struct se_portal_group *se_tpg; 1425 sense_reason_t rc; 1426 int ret; 1427 1428 se_tpg = se_sess->se_tpg; 1429 BUG_ON(!se_tpg); 1430 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1431 BUG_ON(in_interrupt()); 1432 /* 1433 * Initialize se_cmd for target operation. From this point 1434 * exceptions are handled by sending exception status via 1435 * target_core_fabric_ops->queue_status() callback 1436 */ 1437 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1438 data_length, data_dir, task_attr, sense); 1439 1440 if (flags & TARGET_SCF_USE_CPUID) 1441 se_cmd->se_cmd_flags |= SCF_USE_CPUID; 1442 else 1443 se_cmd->cpuid = WORK_CPU_UNBOUND; 1444 1445 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1446 se_cmd->unknown_data_length = 1; 1447 /* 1448 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1449 * se_sess->sess_cmd_list. A second kref_get here is necessary 1450 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1451 * kref_put() to happen during fabric packet acknowledgement. 1452 */ 1453 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1454 if (ret) 1455 return ret; 1456 /* 1457 * Signal bidirectional data payloads to target-core 1458 */ 1459 if (flags & TARGET_SCF_BIDI_OP) 1460 se_cmd->se_cmd_flags |= SCF_BIDI; 1461 /* 1462 * Locate se_lun pointer and attach it to struct se_cmd 1463 */ 1464 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1465 if (rc) { 1466 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1467 target_put_sess_cmd(se_cmd); 1468 return 0; 1469 } 1470 1471 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1472 if (rc != 0) { 1473 transport_generic_request_failure(se_cmd, rc); 1474 return 0; 1475 } 1476 1477 /* 1478 * Save pointers for SGLs containing protection information, 1479 * if present. 1480 */ 1481 if (sgl_prot_count) { 1482 se_cmd->t_prot_sg = sgl_prot; 1483 se_cmd->t_prot_nents = sgl_prot_count; 1484 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1485 } 1486 1487 /* 1488 * When a non zero sgl_count has been passed perform SGL passthrough 1489 * mapping for pre-allocated fabric memory instead of having target 1490 * core perform an internal SGL allocation.. 1491 */ 1492 if (sgl_count != 0) { 1493 BUG_ON(!sgl); 1494 1495 /* 1496 * A work-around for tcm_loop as some userspace code via 1497 * scsi-generic do not memset their associated read buffers, 1498 * so go ahead and do that here for type non-data CDBs. Also 1499 * note that this is currently guaranteed to be a single SGL 1500 * for this case by target core in target_setup_cmd_from_cdb() 1501 * -> transport_generic_cmd_sequencer(). 1502 */ 1503 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1504 se_cmd->data_direction == DMA_FROM_DEVICE) { 1505 unsigned char *buf = NULL; 1506 1507 if (sgl) 1508 buf = kmap(sg_page(sgl)) + sgl->offset; 1509 1510 if (buf) { 1511 memset(buf, 0, sgl->length); 1512 kunmap(sg_page(sgl)); 1513 } 1514 } 1515 1516 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1517 sgl_bidi, sgl_bidi_count); 1518 if (rc != 0) { 1519 transport_generic_request_failure(se_cmd, rc); 1520 return 0; 1521 } 1522 } 1523 1524 /* 1525 * Check if we need to delay processing because of ALUA 1526 * Active/NonOptimized primary access state.. 1527 */ 1528 core_alua_check_nonop_delay(se_cmd); 1529 1530 transport_handle_cdb_direct(se_cmd); 1531 return 0; 1532 } 1533 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1534 1535 /* 1536 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1537 * 1538 * @se_cmd: command descriptor to submit 1539 * @se_sess: associated se_sess for endpoint 1540 * @cdb: pointer to SCSI CDB 1541 * @sense: pointer to SCSI sense buffer 1542 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1543 * @data_length: fabric expected data transfer length 1544 * @task_addr: SAM task attribute 1545 * @data_dir: DMA data direction 1546 * @flags: flags for command submission from target_sc_flags_tables 1547 * 1548 * Task tags are supported if the caller has set @se_cmd->tag. 1549 * 1550 * Returns non zero to signal active I/O shutdown failure. All other 1551 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1552 * but still return zero here. 1553 * 1554 * This may only be called from process context, and also currently 1555 * assumes internal allocation of fabric payload buffer by target-core. 1556 * 1557 * It also assumes interal target core SGL memory allocation. 1558 */ 1559 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1560 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1561 u32 data_length, int task_attr, int data_dir, int flags) 1562 { 1563 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1564 unpacked_lun, data_length, task_attr, data_dir, 1565 flags, NULL, 0, NULL, 0, NULL, 0); 1566 } 1567 EXPORT_SYMBOL(target_submit_cmd); 1568 1569 static void target_complete_tmr_failure(struct work_struct *work) 1570 { 1571 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1572 1573 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1574 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1575 1576 transport_cmd_check_stop_to_fabric(se_cmd); 1577 } 1578 1579 /** 1580 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1581 * for TMR CDBs 1582 * 1583 * @se_cmd: command descriptor to submit 1584 * @se_sess: associated se_sess for endpoint 1585 * @sense: pointer to SCSI sense buffer 1586 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1587 * @fabric_context: fabric context for TMR req 1588 * @tm_type: Type of TM request 1589 * @gfp: gfp type for caller 1590 * @tag: referenced task tag for TMR_ABORT_TASK 1591 * @flags: submit cmd flags 1592 * 1593 * Callable from all contexts. 1594 **/ 1595 1596 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1597 unsigned char *sense, u64 unpacked_lun, 1598 void *fabric_tmr_ptr, unsigned char tm_type, 1599 gfp_t gfp, u64 tag, int flags) 1600 { 1601 struct se_portal_group *se_tpg; 1602 int ret; 1603 1604 se_tpg = se_sess->se_tpg; 1605 BUG_ON(!se_tpg); 1606 1607 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1608 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1609 /* 1610 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1611 * allocation failure. 1612 */ 1613 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1614 if (ret < 0) 1615 return -ENOMEM; 1616 1617 if (tm_type == TMR_ABORT_TASK) 1618 se_cmd->se_tmr_req->ref_task_tag = tag; 1619 1620 /* See target_submit_cmd for commentary */ 1621 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1622 if (ret) { 1623 core_tmr_release_req(se_cmd->se_tmr_req); 1624 return ret; 1625 } 1626 1627 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1628 if (ret) { 1629 /* 1630 * For callback during failure handling, push this work off 1631 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1632 */ 1633 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1634 schedule_work(&se_cmd->work); 1635 return 0; 1636 } 1637 transport_generic_handle_tmr(se_cmd); 1638 return 0; 1639 } 1640 EXPORT_SYMBOL(target_submit_tmr); 1641 1642 /* 1643 * Handle SAM-esque emulation for generic transport request failures. 1644 */ 1645 void transport_generic_request_failure(struct se_cmd *cmd, 1646 sense_reason_t sense_reason) 1647 { 1648 int ret = 0, post_ret = 0; 1649 1650 if (transport_check_aborted_status(cmd, 1)) 1651 return; 1652 1653 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1654 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1655 pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", 1656 cmd->se_tfo->get_cmd_state(cmd), 1657 cmd->t_state, sense_reason); 1658 pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", 1659 (cmd->transport_state & CMD_T_ACTIVE) != 0, 1660 (cmd->transport_state & CMD_T_STOP) != 0, 1661 (cmd->transport_state & CMD_T_SENT) != 0); 1662 1663 /* 1664 * For SAM Task Attribute emulation for failed struct se_cmd 1665 */ 1666 transport_complete_task_attr(cmd); 1667 /* 1668 * Handle special case for COMPARE_AND_WRITE failure, where the 1669 * callback is expected to drop the per device ->caw_sem. 1670 */ 1671 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1672 cmd->transport_complete_callback) 1673 cmd->transport_complete_callback(cmd, false, &post_ret); 1674 1675 switch (sense_reason) { 1676 case TCM_NON_EXISTENT_LUN: 1677 case TCM_UNSUPPORTED_SCSI_OPCODE: 1678 case TCM_INVALID_CDB_FIELD: 1679 case TCM_INVALID_PARAMETER_LIST: 1680 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1681 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1682 case TCM_UNKNOWN_MODE_PAGE: 1683 case TCM_WRITE_PROTECTED: 1684 case TCM_ADDRESS_OUT_OF_RANGE: 1685 case TCM_CHECK_CONDITION_ABORT_CMD: 1686 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1687 case TCM_CHECK_CONDITION_NOT_READY: 1688 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1689 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1690 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1691 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: 1692 case TCM_TOO_MANY_TARGET_DESCS: 1693 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: 1694 case TCM_TOO_MANY_SEGMENT_DESCS: 1695 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: 1696 break; 1697 case TCM_OUT_OF_RESOURCES: 1698 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1699 break; 1700 case TCM_RESERVATION_CONFLICT: 1701 /* 1702 * No SENSE Data payload for this case, set SCSI Status 1703 * and queue the response to $FABRIC_MOD. 1704 * 1705 * Uses linux/include/scsi/scsi.h SAM status codes defs 1706 */ 1707 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1708 /* 1709 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1710 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1711 * CONFLICT STATUS. 1712 * 1713 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1714 */ 1715 if (cmd->se_sess && 1716 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1717 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1718 cmd->orig_fe_lun, 0x2C, 1719 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1720 } 1721 trace_target_cmd_complete(cmd); 1722 ret = cmd->se_tfo->queue_status(cmd); 1723 if (ret == -EAGAIN || ret == -ENOMEM) 1724 goto queue_full; 1725 goto check_stop; 1726 default: 1727 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1728 cmd->t_task_cdb[0], sense_reason); 1729 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1730 break; 1731 } 1732 1733 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1734 if (ret == -EAGAIN || ret == -ENOMEM) 1735 goto queue_full; 1736 1737 check_stop: 1738 transport_lun_remove_cmd(cmd); 1739 transport_cmd_check_stop_to_fabric(cmd); 1740 return; 1741 1742 queue_full: 1743 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1744 transport_handle_queue_full(cmd, cmd->se_dev); 1745 } 1746 EXPORT_SYMBOL(transport_generic_request_failure); 1747 1748 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) 1749 { 1750 sense_reason_t ret; 1751 1752 if (!cmd->execute_cmd) { 1753 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1754 goto err; 1755 } 1756 if (do_checks) { 1757 /* 1758 * Check for an existing UNIT ATTENTION condition after 1759 * target_handle_task_attr() has done SAM task attr 1760 * checking, and possibly have already defered execution 1761 * out to target_restart_delayed_cmds() context. 1762 */ 1763 ret = target_scsi3_ua_check(cmd); 1764 if (ret) 1765 goto err; 1766 1767 ret = target_alua_state_check(cmd); 1768 if (ret) 1769 goto err; 1770 1771 ret = target_check_reservation(cmd); 1772 if (ret) { 1773 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1774 goto err; 1775 } 1776 } 1777 1778 ret = cmd->execute_cmd(cmd); 1779 if (!ret) 1780 return; 1781 err: 1782 spin_lock_irq(&cmd->t_state_lock); 1783 cmd->transport_state &= ~CMD_T_SENT; 1784 spin_unlock_irq(&cmd->t_state_lock); 1785 1786 transport_generic_request_failure(cmd, ret); 1787 } 1788 1789 static int target_write_prot_action(struct se_cmd *cmd) 1790 { 1791 u32 sectors; 1792 /* 1793 * Perform WRITE_INSERT of PI using software emulation when backend 1794 * device has PI enabled, if the transport has not already generated 1795 * PI using hardware WRITE_INSERT offload. 1796 */ 1797 switch (cmd->prot_op) { 1798 case TARGET_PROT_DOUT_INSERT: 1799 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1800 sbc_dif_generate(cmd); 1801 break; 1802 case TARGET_PROT_DOUT_STRIP: 1803 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1804 break; 1805 1806 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1807 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1808 sectors, 0, cmd->t_prot_sg, 0); 1809 if (unlikely(cmd->pi_err)) { 1810 spin_lock_irq(&cmd->t_state_lock); 1811 cmd->transport_state &= ~CMD_T_SENT; 1812 spin_unlock_irq(&cmd->t_state_lock); 1813 transport_generic_request_failure(cmd, cmd->pi_err); 1814 return -1; 1815 } 1816 break; 1817 default: 1818 break; 1819 } 1820 1821 return 0; 1822 } 1823 1824 static bool target_handle_task_attr(struct se_cmd *cmd) 1825 { 1826 struct se_device *dev = cmd->se_dev; 1827 1828 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1829 return false; 1830 1831 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; 1832 1833 /* 1834 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1835 * to allow the passed struct se_cmd list of tasks to the front of the list. 1836 */ 1837 switch (cmd->sam_task_attr) { 1838 case TCM_HEAD_TAG: 1839 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 1840 cmd->t_task_cdb[0]); 1841 return false; 1842 case TCM_ORDERED_TAG: 1843 atomic_inc_mb(&dev->dev_ordered_sync); 1844 1845 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 1846 cmd->t_task_cdb[0]); 1847 1848 /* 1849 * Execute an ORDERED command if no other older commands 1850 * exist that need to be completed first. 1851 */ 1852 if (!atomic_read(&dev->simple_cmds)) 1853 return false; 1854 break; 1855 default: 1856 /* 1857 * For SIMPLE and UNTAGGED Task Attribute commands 1858 */ 1859 atomic_inc_mb(&dev->simple_cmds); 1860 break; 1861 } 1862 1863 if (atomic_read(&dev->dev_ordered_sync) == 0) 1864 return false; 1865 1866 spin_lock(&dev->delayed_cmd_lock); 1867 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 1868 spin_unlock(&dev->delayed_cmd_lock); 1869 1870 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 1871 cmd->t_task_cdb[0], cmd->sam_task_attr); 1872 return true; 1873 } 1874 1875 static int __transport_check_aborted_status(struct se_cmd *, int); 1876 1877 void target_execute_cmd(struct se_cmd *cmd) 1878 { 1879 /* 1880 * Determine if frontend context caller is requesting the stopping of 1881 * this command for frontend exceptions. 1882 * 1883 * If the received CDB has aleady been aborted stop processing it here. 1884 */ 1885 spin_lock_irq(&cmd->t_state_lock); 1886 if (__transport_check_aborted_status(cmd, 1)) { 1887 spin_unlock_irq(&cmd->t_state_lock); 1888 return; 1889 } 1890 if (cmd->transport_state & CMD_T_STOP) { 1891 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 1892 __func__, __LINE__, cmd->tag); 1893 1894 spin_unlock_irq(&cmd->t_state_lock); 1895 complete_all(&cmd->t_transport_stop_comp); 1896 return; 1897 } 1898 1899 cmd->t_state = TRANSPORT_PROCESSING; 1900 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 1901 spin_unlock_irq(&cmd->t_state_lock); 1902 1903 if (target_write_prot_action(cmd)) 1904 return; 1905 1906 if (target_handle_task_attr(cmd)) { 1907 spin_lock_irq(&cmd->t_state_lock); 1908 cmd->transport_state &= ~CMD_T_SENT; 1909 spin_unlock_irq(&cmd->t_state_lock); 1910 return; 1911 } 1912 1913 __target_execute_cmd(cmd, true); 1914 } 1915 EXPORT_SYMBOL(target_execute_cmd); 1916 1917 /* 1918 * Process all commands up to the last received ORDERED task attribute which 1919 * requires another blocking boundary 1920 */ 1921 static void target_restart_delayed_cmds(struct se_device *dev) 1922 { 1923 for (;;) { 1924 struct se_cmd *cmd; 1925 1926 spin_lock(&dev->delayed_cmd_lock); 1927 if (list_empty(&dev->delayed_cmd_list)) { 1928 spin_unlock(&dev->delayed_cmd_lock); 1929 break; 1930 } 1931 1932 cmd = list_entry(dev->delayed_cmd_list.next, 1933 struct se_cmd, se_delayed_node); 1934 list_del(&cmd->se_delayed_node); 1935 spin_unlock(&dev->delayed_cmd_lock); 1936 1937 __target_execute_cmd(cmd, true); 1938 1939 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 1940 break; 1941 } 1942 } 1943 1944 /* 1945 * Called from I/O completion to determine which dormant/delayed 1946 * and ordered cmds need to have their tasks added to the execution queue. 1947 */ 1948 static void transport_complete_task_attr(struct se_cmd *cmd) 1949 { 1950 struct se_device *dev = cmd->se_dev; 1951 1952 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1953 return; 1954 1955 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) 1956 goto restart; 1957 1958 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 1959 atomic_dec_mb(&dev->simple_cmds); 1960 dev->dev_cur_ordered_id++; 1961 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 1962 dev->dev_cur_ordered_id++; 1963 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 1964 dev->dev_cur_ordered_id); 1965 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 1966 atomic_dec_mb(&dev->dev_ordered_sync); 1967 1968 dev->dev_cur_ordered_id++; 1969 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 1970 dev->dev_cur_ordered_id); 1971 } 1972 restart: 1973 target_restart_delayed_cmds(dev); 1974 } 1975 1976 static void transport_complete_qf(struct se_cmd *cmd) 1977 { 1978 int ret = 0; 1979 1980 transport_complete_task_attr(cmd); 1981 1982 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1983 trace_target_cmd_complete(cmd); 1984 ret = cmd->se_tfo->queue_status(cmd); 1985 goto out; 1986 } 1987 1988 switch (cmd->data_direction) { 1989 case DMA_FROM_DEVICE: 1990 if (cmd->scsi_status) 1991 goto queue_status; 1992 1993 trace_target_cmd_complete(cmd); 1994 ret = cmd->se_tfo->queue_data_in(cmd); 1995 break; 1996 case DMA_TO_DEVICE: 1997 if (cmd->se_cmd_flags & SCF_BIDI) { 1998 ret = cmd->se_tfo->queue_data_in(cmd); 1999 break; 2000 } 2001 /* Fall through for DMA_TO_DEVICE */ 2002 case DMA_NONE: 2003 queue_status: 2004 trace_target_cmd_complete(cmd); 2005 ret = cmd->se_tfo->queue_status(cmd); 2006 break; 2007 default: 2008 break; 2009 } 2010 2011 out: 2012 if (ret < 0) { 2013 transport_handle_queue_full(cmd, cmd->se_dev); 2014 return; 2015 } 2016 transport_lun_remove_cmd(cmd); 2017 transport_cmd_check_stop_to_fabric(cmd); 2018 } 2019 2020 static void transport_handle_queue_full( 2021 struct se_cmd *cmd, 2022 struct se_device *dev) 2023 { 2024 spin_lock_irq(&dev->qf_cmd_lock); 2025 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2026 atomic_inc_mb(&dev->dev_qf_count); 2027 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2028 2029 schedule_work(&cmd->se_dev->qf_work_queue); 2030 } 2031 2032 static bool target_read_prot_action(struct se_cmd *cmd) 2033 { 2034 switch (cmd->prot_op) { 2035 case TARGET_PROT_DIN_STRIP: 2036 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2037 u32 sectors = cmd->data_length >> 2038 ilog2(cmd->se_dev->dev_attrib.block_size); 2039 2040 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2041 sectors, 0, cmd->t_prot_sg, 2042 0); 2043 if (cmd->pi_err) 2044 return true; 2045 } 2046 break; 2047 case TARGET_PROT_DIN_INSERT: 2048 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2049 break; 2050 2051 sbc_dif_generate(cmd); 2052 break; 2053 default: 2054 break; 2055 } 2056 2057 return false; 2058 } 2059 2060 static void target_complete_ok_work(struct work_struct *work) 2061 { 2062 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2063 int ret; 2064 2065 /* 2066 * Check if we need to move delayed/dormant tasks from cmds on the 2067 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2068 * Attribute. 2069 */ 2070 transport_complete_task_attr(cmd); 2071 2072 /* 2073 * Check to schedule QUEUE_FULL work, or execute an existing 2074 * cmd->transport_qf_callback() 2075 */ 2076 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2077 schedule_work(&cmd->se_dev->qf_work_queue); 2078 2079 /* 2080 * Check if we need to send a sense buffer from 2081 * the struct se_cmd in question. 2082 */ 2083 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2084 WARN_ON(!cmd->scsi_status); 2085 ret = transport_send_check_condition_and_sense( 2086 cmd, 0, 1); 2087 if (ret == -EAGAIN || ret == -ENOMEM) 2088 goto queue_full; 2089 2090 transport_lun_remove_cmd(cmd); 2091 transport_cmd_check_stop_to_fabric(cmd); 2092 return; 2093 } 2094 /* 2095 * Check for a callback, used by amongst other things 2096 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2097 */ 2098 if (cmd->transport_complete_callback) { 2099 sense_reason_t rc; 2100 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2101 bool zero_dl = !(cmd->data_length); 2102 int post_ret = 0; 2103 2104 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2105 if (!rc && !post_ret) { 2106 if (caw && zero_dl) 2107 goto queue_rsp; 2108 2109 return; 2110 } else if (rc) { 2111 ret = transport_send_check_condition_and_sense(cmd, 2112 rc, 0); 2113 if (ret == -EAGAIN || ret == -ENOMEM) 2114 goto queue_full; 2115 2116 transport_lun_remove_cmd(cmd); 2117 transport_cmd_check_stop_to_fabric(cmd); 2118 return; 2119 } 2120 } 2121 2122 queue_rsp: 2123 switch (cmd->data_direction) { 2124 case DMA_FROM_DEVICE: 2125 if (cmd->scsi_status) 2126 goto queue_status; 2127 2128 atomic_long_add(cmd->data_length, 2129 &cmd->se_lun->lun_stats.tx_data_octets); 2130 /* 2131 * Perform READ_STRIP of PI using software emulation when 2132 * backend had PI enabled, if the transport will not be 2133 * performing hardware READ_STRIP offload. 2134 */ 2135 if (target_read_prot_action(cmd)) { 2136 ret = transport_send_check_condition_and_sense(cmd, 2137 cmd->pi_err, 0); 2138 if (ret == -EAGAIN || ret == -ENOMEM) 2139 goto queue_full; 2140 2141 transport_lun_remove_cmd(cmd); 2142 transport_cmd_check_stop_to_fabric(cmd); 2143 return; 2144 } 2145 2146 trace_target_cmd_complete(cmd); 2147 ret = cmd->se_tfo->queue_data_in(cmd); 2148 if (ret == -EAGAIN || ret == -ENOMEM) 2149 goto queue_full; 2150 break; 2151 case DMA_TO_DEVICE: 2152 atomic_long_add(cmd->data_length, 2153 &cmd->se_lun->lun_stats.rx_data_octets); 2154 /* 2155 * Check if we need to send READ payload for BIDI-COMMAND 2156 */ 2157 if (cmd->se_cmd_flags & SCF_BIDI) { 2158 atomic_long_add(cmd->data_length, 2159 &cmd->se_lun->lun_stats.tx_data_octets); 2160 ret = cmd->se_tfo->queue_data_in(cmd); 2161 if (ret == -EAGAIN || ret == -ENOMEM) 2162 goto queue_full; 2163 break; 2164 } 2165 /* Fall through for DMA_TO_DEVICE */ 2166 case DMA_NONE: 2167 queue_status: 2168 trace_target_cmd_complete(cmd); 2169 ret = cmd->se_tfo->queue_status(cmd); 2170 if (ret == -EAGAIN || ret == -ENOMEM) 2171 goto queue_full; 2172 break; 2173 default: 2174 break; 2175 } 2176 2177 transport_lun_remove_cmd(cmd); 2178 transport_cmd_check_stop_to_fabric(cmd); 2179 return; 2180 2181 queue_full: 2182 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2183 " data_direction: %d\n", cmd, cmd->data_direction); 2184 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 2185 transport_handle_queue_full(cmd, cmd->se_dev); 2186 } 2187 2188 void target_free_sgl(struct scatterlist *sgl, int nents) 2189 { 2190 struct scatterlist *sg; 2191 int count; 2192 2193 for_each_sg(sgl, sg, nents, count) 2194 __free_page(sg_page(sg)); 2195 2196 kfree(sgl); 2197 } 2198 EXPORT_SYMBOL(target_free_sgl); 2199 2200 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2201 { 2202 /* 2203 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2204 * emulation, and free + reset pointers if necessary.. 2205 */ 2206 if (!cmd->t_data_sg_orig) 2207 return; 2208 2209 kfree(cmd->t_data_sg); 2210 cmd->t_data_sg = cmd->t_data_sg_orig; 2211 cmd->t_data_sg_orig = NULL; 2212 cmd->t_data_nents = cmd->t_data_nents_orig; 2213 cmd->t_data_nents_orig = 0; 2214 } 2215 2216 static inline void transport_free_pages(struct se_cmd *cmd) 2217 { 2218 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2219 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2220 cmd->t_prot_sg = NULL; 2221 cmd->t_prot_nents = 0; 2222 } 2223 2224 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2225 /* 2226 * Release special case READ buffer payload required for 2227 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2228 */ 2229 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2230 target_free_sgl(cmd->t_bidi_data_sg, 2231 cmd->t_bidi_data_nents); 2232 cmd->t_bidi_data_sg = NULL; 2233 cmd->t_bidi_data_nents = 0; 2234 } 2235 transport_reset_sgl_orig(cmd); 2236 return; 2237 } 2238 transport_reset_sgl_orig(cmd); 2239 2240 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2241 cmd->t_data_sg = NULL; 2242 cmd->t_data_nents = 0; 2243 2244 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2245 cmd->t_bidi_data_sg = NULL; 2246 cmd->t_bidi_data_nents = 0; 2247 } 2248 2249 /** 2250 * transport_put_cmd - release a reference to a command 2251 * @cmd: command to release 2252 * 2253 * This routine releases our reference to the command and frees it if possible. 2254 */ 2255 static int transport_put_cmd(struct se_cmd *cmd) 2256 { 2257 BUG_ON(!cmd->se_tfo); 2258 /* 2259 * If this cmd has been setup with target_get_sess_cmd(), drop 2260 * the kref and call ->release_cmd() in kref callback. 2261 */ 2262 return target_put_sess_cmd(cmd); 2263 } 2264 2265 void *transport_kmap_data_sg(struct se_cmd *cmd) 2266 { 2267 struct scatterlist *sg = cmd->t_data_sg; 2268 struct page **pages; 2269 int i; 2270 2271 /* 2272 * We need to take into account a possible offset here for fabrics like 2273 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2274 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2275 */ 2276 if (!cmd->t_data_nents) 2277 return NULL; 2278 2279 BUG_ON(!sg); 2280 if (cmd->t_data_nents == 1) 2281 return kmap(sg_page(sg)) + sg->offset; 2282 2283 /* >1 page. use vmap */ 2284 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 2285 if (!pages) 2286 return NULL; 2287 2288 /* convert sg[] to pages[] */ 2289 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2290 pages[i] = sg_page(sg); 2291 } 2292 2293 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2294 kfree(pages); 2295 if (!cmd->t_data_vmap) 2296 return NULL; 2297 2298 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2299 } 2300 EXPORT_SYMBOL(transport_kmap_data_sg); 2301 2302 void transport_kunmap_data_sg(struct se_cmd *cmd) 2303 { 2304 if (!cmd->t_data_nents) { 2305 return; 2306 } else if (cmd->t_data_nents == 1) { 2307 kunmap(sg_page(cmd->t_data_sg)); 2308 return; 2309 } 2310 2311 vunmap(cmd->t_data_vmap); 2312 cmd->t_data_vmap = NULL; 2313 } 2314 EXPORT_SYMBOL(transport_kunmap_data_sg); 2315 2316 int 2317 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2318 bool zero_page, bool chainable) 2319 { 2320 struct scatterlist *sg; 2321 struct page *page; 2322 gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; 2323 unsigned int nalloc, nent; 2324 int i = 0; 2325 2326 nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE); 2327 if (chainable) 2328 nalloc++; 2329 sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL); 2330 if (!sg) 2331 return -ENOMEM; 2332 2333 sg_init_table(sg, nalloc); 2334 2335 while (length) { 2336 u32 page_len = min_t(u32, length, PAGE_SIZE); 2337 page = alloc_page(GFP_KERNEL | zero_flag); 2338 if (!page) 2339 goto out; 2340 2341 sg_set_page(&sg[i], page, page_len, 0); 2342 length -= page_len; 2343 i++; 2344 } 2345 *sgl = sg; 2346 *nents = nent; 2347 return 0; 2348 2349 out: 2350 while (i > 0) { 2351 i--; 2352 __free_page(sg_page(&sg[i])); 2353 } 2354 kfree(sg); 2355 return -ENOMEM; 2356 } 2357 EXPORT_SYMBOL(target_alloc_sgl); 2358 2359 /* 2360 * Allocate any required resources to execute the command. For writes we 2361 * might not have the payload yet, so notify the fabric via a call to 2362 * ->write_pending instead. Otherwise place it on the execution queue. 2363 */ 2364 sense_reason_t 2365 transport_generic_new_cmd(struct se_cmd *cmd) 2366 { 2367 unsigned long flags; 2368 int ret = 0; 2369 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2370 2371 if (cmd->prot_op != TARGET_PROT_NORMAL && 2372 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2373 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2374 cmd->prot_length, true, false); 2375 if (ret < 0) 2376 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2377 } 2378 2379 /* 2380 * Determine is the TCM fabric module has already allocated physical 2381 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2382 * beforehand. 2383 */ 2384 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2385 cmd->data_length) { 2386 2387 if ((cmd->se_cmd_flags & SCF_BIDI) || 2388 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2389 u32 bidi_length; 2390 2391 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2392 bidi_length = cmd->t_task_nolb * 2393 cmd->se_dev->dev_attrib.block_size; 2394 else 2395 bidi_length = cmd->data_length; 2396 2397 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2398 &cmd->t_bidi_data_nents, 2399 bidi_length, zero_flag, false); 2400 if (ret < 0) 2401 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2402 } 2403 2404 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2405 cmd->data_length, zero_flag, false); 2406 if (ret < 0) 2407 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2408 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2409 cmd->data_length) { 2410 /* 2411 * Special case for COMPARE_AND_WRITE with fabrics 2412 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2413 */ 2414 u32 caw_length = cmd->t_task_nolb * 2415 cmd->se_dev->dev_attrib.block_size; 2416 2417 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2418 &cmd->t_bidi_data_nents, 2419 caw_length, zero_flag, false); 2420 if (ret < 0) 2421 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2422 } 2423 /* 2424 * If this command is not a write we can execute it right here, 2425 * for write buffers we need to notify the fabric driver first 2426 * and let it call back once the write buffers are ready. 2427 */ 2428 target_add_to_state_list(cmd); 2429 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2430 target_execute_cmd(cmd); 2431 return 0; 2432 } 2433 2434 spin_lock_irqsave(&cmd->t_state_lock, flags); 2435 cmd->t_state = TRANSPORT_WRITE_PENDING; 2436 /* 2437 * Determine if frontend context caller is requesting the stopping of 2438 * this command for frontend exceptions. 2439 */ 2440 if (cmd->transport_state & CMD_T_STOP) { 2441 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2442 __func__, __LINE__, cmd->tag); 2443 2444 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2445 2446 complete_all(&cmd->t_transport_stop_comp); 2447 return 0; 2448 } 2449 cmd->transport_state &= ~CMD_T_ACTIVE; 2450 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2451 2452 ret = cmd->se_tfo->write_pending(cmd); 2453 if (ret == -EAGAIN || ret == -ENOMEM) 2454 goto queue_full; 2455 2456 /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ 2457 WARN_ON(ret); 2458 2459 return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2460 2461 queue_full: 2462 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2463 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 2464 transport_handle_queue_full(cmd, cmd->se_dev); 2465 return 0; 2466 } 2467 EXPORT_SYMBOL(transport_generic_new_cmd); 2468 2469 static void transport_write_pending_qf(struct se_cmd *cmd) 2470 { 2471 int ret; 2472 2473 ret = cmd->se_tfo->write_pending(cmd); 2474 if (ret == -EAGAIN || ret == -ENOMEM) { 2475 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2476 cmd); 2477 transport_handle_queue_full(cmd, cmd->se_dev); 2478 } 2479 } 2480 2481 static bool 2482 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2483 unsigned long *flags); 2484 2485 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2486 { 2487 unsigned long flags; 2488 2489 spin_lock_irqsave(&cmd->t_state_lock, flags); 2490 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2491 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2492 } 2493 2494 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2495 { 2496 int ret = 0; 2497 bool aborted = false, tas = false; 2498 2499 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2500 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2501 target_wait_free_cmd(cmd, &aborted, &tas); 2502 2503 if (!aborted || tas) 2504 ret = transport_put_cmd(cmd); 2505 } else { 2506 if (wait_for_tasks) 2507 target_wait_free_cmd(cmd, &aborted, &tas); 2508 /* 2509 * Handle WRITE failure case where transport_generic_new_cmd() 2510 * has already added se_cmd to state_list, but fabric has 2511 * failed command before I/O submission. 2512 */ 2513 if (cmd->state_active) 2514 target_remove_from_state_list(cmd); 2515 2516 if (cmd->se_lun) 2517 transport_lun_remove_cmd(cmd); 2518 2519 if (!aborted || tas) 2520 ret = transport_put_cmd(cmd); 2521 } 2522 /* 2523 * If the task has been internally aborted due to TMR ABORT_TASK 2524 * or LUN_RESET, target_core_tmr.c is responsible for performing 2525 * the remaining calls to target_put_sess_cmd(), and not the 2526 * callers of this function. 2527 */ 2528 if (aborted) { 2529 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2530 wait_for_completion(&cmd->cmd_wait_comp); 2531 cmd->se_tfo->release_cmd(cmd); 2532 ret = 1; 2533 } 2534 return ret; 2535 } 2536 EXPORT_SYMBOL(transport_generic_free_cmd); 2537 2538 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 2539 * @se_cmd: command descriptor to add 2540 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2541 */ 2542 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2543 { 2544 struct se_session *se_sess = se_cmd->se_sess; 2545 unsigned long flags; 2546 int ret = 0; 2547 2548 /* 2549 * Add a second kref if the fabric caller is expecting to handle 2550 * fabric acknowledgement that requires two target_put_sess_cmd() 2551 * invocations before se_cmd descriptor release. 2552 */ 2553 if (ack_kref) { 2554 if (!kref_get_unless_zero(&se_cmd->cmd_kref)) 2555 return -EINVAL; 2556 2557 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2558 } 2559 2560 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2561 if (se_sess->sess_tearing_down) { 2562 ret = -ESHUTDOWN; 2563 goto out; 2564 } 2565 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2566 out: 2567 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2568 2569 if (ret && ack_kref) 2570 target_put_sess_cmd(se_cmd); 2571 2572 return ret; 2573 } 2574 EXPORT_SYMBOL(target_get_sess_cmd); 2575 2576 static void target_free_cmd_mem(struct se_cmd *cmd) 2577 { 2578 transport_free_pages(cmd); 2579 2580 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2581 core_tmr_release_req(cmd->se_tmr_req); 2582 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2583 kfree(cmd->t_task_cdb); 2584 } 2585 2586 static void target_release_cmd_kref(struct kref *kref) 2587 { 2588 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2589 struct se_session *se_sess = se_cmd->se_sess; 2590 unsigned long flags; 2591 bool fabric_stop; 2592 2593 if (se_sess) { 2594 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2595 2596 spin_lock(&se_cmd->t_state_lock); 2597 fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && 2598 (se_cmd->transport_state & CMD_T_ABORTED); 2599 spin_unlock(&se_cmd->t_state_lock); 2600 2601 if (se_cmd->cmd_wait_set || fabric_stop) { 2602 list_del_init(&se_cmd->se_cmd_list); 2603 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2604 target_free_cmd_mem(se_cmd); 2605 complete(&se_cmd->cmd_wait_comp); 2606 return; 2607 } 2608 list_del_init(&se_cmd->se_cmd_list); 2609 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2610 } 2611 2612 target_free_cmd_mem(se_cmd); 2613 se_cmd->se_tfo->release_cmd(se_cmd); 2614 } 2615 2616 /** 2617 * target_put_sess_cmd - decrease the command reference count 2618 * @se_cmd: command to drop a reference from 2619 * 2620 * Returns 1 if and only if this target_put_sess_cmd() call caused the 2621 * refcount to drop to zero. Returns zero otherwise. 2622 */ 2623 int target_put_sess_cmd(struct se_cmd *se_cmd) 2624 { 2625 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2626 } 2627 EXPORT_SYMBOL(target_put_sess_cmd); 2628 2629 /* target_sess_cmd_list_set_waiting - Flag all commands in 2630 * sess_cmd_list to complete cmd_wait_comp. Set 2631 * sess_tearing_down so no more commands are queued. 2632 * @se_sess: session to flag 2633 */ 2634 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2635 { 2636 struct se_cmd *se_cmd, *tmp_cmd; 2637 unsigned long flags; 2638 int rc; 2639 2640 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2641 if (se_sess->sess_tearing_down) { 2642 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2643 return; 2644 } 2645 se_sess->sess_tearing_down = 1; 2646 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2647 2648 list_for_each_entry_safe(se_cmd, tmp_cmd, 2649 &se_sess->sess_wait_list, se_cmd_list) { 2650 rc = kref_get_unless_zero(&se_cmd->cmd_kref); 2651 if (rc) { 2652 se_cmd->cmd_wait_set = 1; 2653 spin_lock(&se_cmd->t_state_lock); 2654 se_cmd->transport_state |= CMD_T_FABRIC_STOP; 2655 spin_unlock(&se_cmd->t_state_lock); 2656 } else 2657 list_del_init(&se_cmd->se_cmd_list); 2658 } 2659 2660 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2661 } 2662 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2663 2664 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 2665 * @se_sess: session to wait for active I/O 2666 */ 2667 void target_wait_for_sess_cmds(struct se_session *se_sess) 2668 { 2669 struct se_cmd *se_cmd, *tmp_cmd; 2670 unsigned long flags; 2671 bool tas; 2672 2673 list_for_each_entry_safe(se_cmd, tmp_cmd, 2674 &se_sess->sess_wait_list, se_cmd_list) { 2675 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2676 " %d\n", se_cmd, se_cmd->t_state, 2677 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2678 2679 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 2680 tas = (se_cmd->transport_state & CMD_T_TAS); 2681 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 2682 2683 if (!target_put_sess_cmd(se_cmd)) { 2684 if (tas) 2685 target_put_sess_cmd(se_cmd); 2686 } 2687 2688 wait_for_completion(&se_cmd->cmd_wait_comp); 2689 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2690 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2691 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2692 2693 se_cmd->se_tfo->release_cmd(se_cmd); 2694 } 2695 2696 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2697 WARN_ON(!list_empty(&se_sess->sess_cmd_list)); 2698 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2699 2700 } 2701 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2702 2703 static void target_lun_confirm(struct percpu_ref *ref) 2704 { 2705 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); 2706 2707 complete(&lun->lun_ref_comp); 2708 } 2709 2710 void transport_clear_lun_ref(struct se_lun *lun) 2711 { 2712 /* 2713 * Mark the percpu-ref as DEAD, switch to atomic_t mode, drop 2714 * the initial reference and schedule confirm kill to be 2715 * executed after one full RCU grace period has completed. 2716 */ 2717 percpu_ref_kill_and_confirm(&lun->lun_ref, target_lun_confirm); 2718 /* 2719 * The first completion waits for percpu_ref_switch_to_atomic_rcu() 2720 * to call target_lun_confirm after lun->lun_ref has been marked 2721 * as __PERCPU_REF_DEAD on all CPUs, and switches to atomic_t 2722 * mode so that percpu_ref_tryget_live() lookup of lun->lun_ref 2723 * fails for all new incoming I/O. 2724 */ 2725 wait_for_completion(&lun->lun_ref_comp); 2726 /* 2727 * The second completion waits for percpu_ref_put_many() to 2728 * invoke ->release() after lun->lun_ref has switched to 2729 * atomic_t mode, and lun->lun_ref.count has reached zero. 2730 * 2731 * At this point all target-core lun->lun_ref references have 2732 * been dropped via transport_lun_remove_cmd(), and it's safe 2733 * to proceed with the remaining LUN shutdown. 2734 */ 2735 wait_for_completion(&lun->lun_shutdown_comp); 2736 } 2737 2738 static bool 2739 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 2740 bool *aborted, bool *tas, unsigned long *flags) 2741 __releases(&cmd->t_state_lock) 2742 __acquires(&cmd->t_state_lock) 2743 { 2744 2745 assert_spin_locked(&cmd->t_state_lock); 2746 WARN_ON_ONCE(!irqs_disabled()); 2747 2748 if (fabric_stop) 2749 cmd->transport_state |= CMD_T_FABRIC_STOP; 2750 2751 if (cmd->transport_state & CMD_T_ABORTED) 2752 *aborted = true; 2753 2754 if (cmd->transport_state & CMD_T_TAS) 2755 *tas = true; 2756 2757 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2758 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2759 return false; 2760 2761 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2762 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2763 return false; 2764 2765 if (!(cmd->transport_state & CMD_T_ACTIVE)) 2766 return false; 2767 2768 if (fabric_stop && *aborted) 2769 return false; 2770 2771 cmd->transport_state |= CMD_T_STOP; 2772 2773 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d," 2774 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag, 2775 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2776 2777 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 2778 2779 wait_for_completion(&cmd->t_transport_stop_comp); 2780 2781 spin_lock_irqsave(&cmd->t_state_lock, *flags); 2782 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2783 2784 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 2785 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 2786 2787 return true; 2788 } 2789 2790 /** 2791 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp 2792 * @cmd: command to wait on 2793 */ 2794 bool transport_wait_for_tasks(struct se_cmd *cmd) 2795 { 2796 unsigned long flags; 2797 bool ret, aborted = false, tas = false; 2798 2799 spin_lock_irqsave(&cmd->t_state_lock, flags); 2800 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 2801 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2802 2803 return ret; 2804 } 2805 EXPORT_SYMBOL(transport_wait_for_tasks); 2806 2807 struct sense_info { 2808 u8 key; 2809 u8 asc; 2810 u8 ascq; 2811 bool add_sector_info; 2812 }; 2813 2814 static const struct sense_info sense_info_table[] = { 2815 [TCM_NO_SENSE] = { 2816 .key = NOT_READY 2817 }, 2818 [TCM_NON_EXISTENT_LUN] = { 2819 .key = ILLEGAL_REQUEST, 2820 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 2821 }, 2822 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 2823 .key = ILLEGAL_REQUEST, 2824 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2825 }, 2826 [TCM_SECTOR_COUNT_TOO_MANY] = { 2827 .key = ILLEGAL_REQUEST, 2828 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 2829 }, 2830 [TCM_UNKNOWN_MODE_PAGE] = { 2831 .key = ILLEGAL_REQUEST, 2832 .asc = 0x24, /* INVALID FIELD IN CDB */ 2833 }, 2834 [TCM_CHECK_CONDITION_ABORT_CMD] = { 2835 .key = ABORTED_COMMAND, 2836 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 2837 .ascq = 0x03, 2838 }, 2839 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 2840 .key = ABORTED_COMMAND, 2841 .asc = 0x0c, /* WRITE ERROR */ 2842 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 2843 }, 2844 [TCM_INVALID_CDB_FIELD] = { 2845 .key = ILLEGAL_REQUEST, 2846 .asc = 0x24, /* INVALID FIELD IN CDB */ 2847 }, 2848 [TCM_INVALID_PARAMETER_LIST] = { 2849 .key = ILLEGAL_REQUEST, 2850 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 2851 }, 2852 [TCM_TOO_MANY_TARGET_DESCS] = { 2853 .key = ILLEGAL_REQUEST, 2854 .asc = 0x26, 2855 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ 2856 }, 2857 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { 2858 .key = ILLEGAL_REQUEST, 2859 .asc = 0x26, 2860 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ 2861 }, 2862 [TCM_TOO_MANY_SEGMENT_DESCS] = { 2863 .key = ILLEGAL_REQUEST, 2864 .asc = 0x26, 2865 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ 2866 }, 2867 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { 2868 .key = ILLEGAL_REQUEST, 2869 .asc = 0x26, 2870 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ 2871 }, 2872 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 2873 .key = ILLEGAL_REQUEST, 2874 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 2875 }, 2876 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 2877 .key = ILLEGAL_REQUEST, 2878 .asc = 0x0c, /* WRITE ERROR */ 2879 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 2880 }, 2881 [TCM_SERVICE_CRC_ERROR] = { 2882 .key = ABORTED_COMMAND, 2883 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 2884 .ascq = 0x05, /* N/A */ 2885 }, 2886 [TCM_SNACK_REJECTED] = { 2887 .key = ABORTED_COMMAND, 2888 .asc = 0x11, /* READ ERROR */ 2889 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 2890 }, 2891 [TCM_WRITE_PROTECTED] = { 2892 .key = DATA_PROTECT, 2893 .asc = 0x27, /* WRITE PROTECTED */ 2894 }, 2895 [TCM_ADDRESS_OUT_OF_RANGE] = { 2896 .key = ILLEGAL_REQUEST, 2897 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 2898 }, 2899 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 2900 .key = UNIT_ATTENTION, 2901 }, 2902 [TCM_CHECK_CONDITION_NOT_READY] = { 2903 .key = NOT_READY, 2904 }, 2905 [TCM_MISCOMPARE_VERIFY] = { 2906 .key = MISCOMPARE, 2907 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 2908 .ascq = 0x00, 2909 }, 2910 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 2911 .key = ABORTED_COMMAND, 2912 .asc = 0x10, 2913 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 2914 .add_sector_info = true, 2915 }, 2916 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 2917 .key = ABORTED_COMMAND, 2918 .asc = 0x10, 2919 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 2920 .add_sector_info = true, 2921 }, 2922 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 2923 .key = ABORTED_COMMAND, 2924 .asc = 0x10, 2925 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 2926 .add_sector_info = true, 2927 }, 2928 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { 2929 .key = COPY_ABORTED, 2930 .asc = 0x0d, 2931 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ 2932 2933 }, 2934 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 2935 /* 2936 * Returning ILLEGAL REQUEST would cause immediate IO errors on 2937 * Solaris initiators. Returning NOT READY instead means the 2938 * operations will be retried a finite number of times and we 2939 * can survive intermittent errors. 2940 */ 2941 .key = NOT_READY, 2942 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 2943 }, 2944 }; 2945 2946 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 2947 { 2948 const struct sense_info *si; 2949 u8 *buffer = cmd->sense_buffer; 2950 int r = (__force int)reason; 2951 u8 asc, ascq; 2952 bool desc_format = target_sense_desc_format(cmd->se_dev); 2953 2954 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 2955 si = &sense_info_table[r]; 2956 else 2957 si = &sense_info_table[(__force int) 2958 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 2959 2960 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 2961 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 2962 WARN_ON_ONCE(asc == 0); 2963 } else if (si->asc == 0) { 2964 WARN_ON_ONCE(cmd->scsi_asc == 0); 2965 asc = cmd->scsi_asc; 2966 ascq = cmd->scsi_ascq; 2967 } else { 2968 asc = si->asc; 2969 ascq = si->ascq; 2970 } 2971 2972 scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq); 2973 if (si->add_sector_info) 2974 return scsi_set_sense_information(buffer, 2975 cmd->scsi_sense_length, 2976 cmd->bad_sector); 2977 2978 return 0; 2979 } 2980 2981 int 2982 transport_send_check_condition_and_sense(struct se_cmd *cmd, 2983 sense_reason_t reason, int from_transport) 2984 { 2985 unsigned long flags; 2986 2987 spin_lock_irqsave(&cmd->t_state_lock, flags); 2988 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2989 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2990 return 0; 2991 } 2992 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 2993 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2994 2995 if (!from_transport) { 2996 int rc; 2997 2998 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 2999 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 3000 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 3001 rc = translate_sense_reason(cmd, reason); 3002 if (rc) 3003 return rc; 3004 } 3005 3006 trace_target_cmd_complete(cmd); 3007 return cmd->se_tfo->queue_status(cmd); 3008 } 3009 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3010 3011 static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) 3012 __releases(&cmd->t_state_lock) 3013 __acquires(&cmd->t_state_lock) 3014 { 3015 assert_spin_locked(&cmd->t_state_lock); 3016 WARN_ON_ONCE(!irqs_disabled()); 3017 3018 if (!(cmd->transport_state & CMD_T_ABORTED)) 3019 return 0; 3020 /* 3021 * If cmd has been aborted but either no status is to be sent or it has 3022 * already been sent, just return 3023 */ 3024 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) { 3025 if (send_status) 3026 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 3027 return 1; 3028 } 3029 3030 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:" 3031 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag); 3032 3033 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; 3034 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 3035 trace_target_cmd_complete(cmd); 3036 3037 spin_unlock_irq(&cmd->t_state_lock); 3038 cmd->se_tfo->queue_status(cmd); 3039 spin_lock_irq(&cmd->t_state_lock); 3040 3041 return 1; 3042 } 3043 3044 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 3045 { 3046 int ret; 3047 3048 spin_lock_irq(&cmd->t_state_lock); 3049 ret = __transport_check_aborted_status(cmd, send_status); 3050 spin_unlock_irq(&cmd->t_state_lock); 3051 3052 return ret; 3053 } 3054 EXPORT_SYMBOL(transport_check_aborted_status); 3055 3056 void transport_send_task_abort(struct se_cmd *cmd) 3057 { 3058 unsigned long flags; 3059 3060 spin_lock_irqsave(&cmd->t_state_lock, flags); 3061 if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { 3062 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3063 return; 3064 } 3065 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3066 3067 /* 3068 * If there are still expected incoming fabric WRITEs, we wait 3069 * until until they have completed before sending a TASK_ABORTED 3070 * response. This response with TASK_ABORTED status will be 3071 * queued back to fabric module by transport_check_aborted_status(). 3072 */ 3073 if (cmd->data_direction == DMA_TO_DEVICE) { 3074 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 3075 spin_lock_irqsave(&cmd->t_state_lock, flags); 3076 if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) { 3077 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3078 goto send_abort; 3079 } 3080 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 3081 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3082 return; 3083 } 3084 } 3085 send_abort: 3086 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 3087 3088 transport_lun_remove_cmd(cmd); 3089 3090 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 3091 cmd->t_task_cdb[0], cmd->tag); 3092 3093 trace_target_cmd_complete(cmd); 3094 cmd->se_tfo->queue_status(cmd); 3095 } 3096 3097 static void target_tmr_work(struct work_struct *work) 3098 { 3099 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3100 struct se_device *dev = cmd->se_dev; 3101 struct se_tmr_req *tmr = cmd->se_tmr_req; 3102 unsigned long flags; 3103 int ret; 3104 3105 spin_lock_irqsave(&cmd->t_state_lock, flags); 3106 if (cmd->transport_state & CMD_T_ABORTED) { 3107 tmr->response = TMR_FUNCTION_REJECTED; 3108 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3109 goto check_stop; 3110 } 3111 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3112 3113 switch (tmr->function) { 3114 case TMR_ABORT_TASK: 3115 core_tmr_abort_task(dev, tmr, cmd->se_sess); 3116 break; 3117 case TMR_ABORT_TASK_SET: 3118 case TMR_CLEAR_ACA: 3119 case TMR_CLEAR_TASK_SET: 3120 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 3121 break; 3122 case TMR_LUN_RESET: 3123 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3124 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3125 TMR_FUNCTION_REJECTED; 3126 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3127 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 3128 cmd->orig_fe_lun, 0x29, 3129 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3130 } 3131 break; 3132 case TMR_TARGET_WARM_RESET: 3133 tmr->response = TMR_FUNCTION_REJECTED; 3134 break; 3135 case TMR_TARGET_COLD_RESET: 3136 tmr->response = TMR_FUNCTION_REJECTED; 3137 break; 3138 default: 3139 pr_err("Uknown TMR function: 0x%02x.\n", 3140 tmr->function); 3141 tmr->response = TMR_FUNCTION_REJECTED; 3142 break; 3143 } 3144 3145 spin_lock_irqsave(&cmd->t_state_lock, flags); 3146 if (cmd->transport_state & CMD_T_ABORTED) { 3147 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3148 goto check_stop; 3149 } 3150 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3151 3152 cmd->se_tfo->queue_tm_rsp(cmd); 3153 3154 check_stop: 3155 transport_cmd_check_stop_to_fabric(cmd); 3156 } 3157 3158 int transport_generic_handle_tmr( 3159 struct se_cmd *cmd) 3160 { 3161 unsigned long flags; 3162 bool aborted = false; 3163 3164 spin_lock_irqsave(&cmd->t_state_lock, flags); 3165 if (cmd->transport_state & CMD_T_ABORTED) { 3166 aborted = true; 3167 } else { 3168 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3169 cmd->transport_state |= CMD_T_ACTIVE; 3170 } 3171 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3172 3173 if (aborted) { 3174 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" 3175 "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, 3176 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3177 transport_cmd_check_stop_to_fabric(cmd); 3178 return 0; 3179 } 3180 3181 INIT_WORK(&cmd->work, target_tmr_work); 3182 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 3183 return 0; 3184 } 3185 EXPORT_SYMBOL(transport_generic_handle_tmr); 3186 3187 bool 3188 target_check_wce(struct se_device *dev) 3189 { 3190 bool wce = false; 3191 3192 if (dev->transport->get_write_cache) 3193 wce = dev->transport->get_write_cache(dev); 3194 else if (dev->dev_attrib.emulate_write_cache > 0) 3195 wce = true; 3196 3197 return wce; 3198 } 3199 3200 bool 3201 target_check_fua(struct se_device *dev) 3202 { 3203 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3204 } 3205