1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * (c) Copyright 2002-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/delay.h> 28 #include <linux/string.h> 29 #include <linux/timer.h> 30 #include <linux/slab.h> 31 #include <linux/spinlock.h> 32 #include <linux/kthread.h> 33 #include <linux/in.h> 34 #include <linux/cdrom.h> 35 #include <linux/module.h> 36 #include <linux/ratelimit.h> 37 #include <linux/vmalloc.h> 38 #include <asm/unaligned.h> 39 #include <net/sock.h> 40 #include <net/tcp.h> 41 #include <scsi/scsi_proto.h> 42 #include <scsi/scsi_common.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 #include <target/target_core_fabric.h> 47 48 #include "target_core_internal.h" 49 #include "target_core_alua.h" 50 #include "target_core_pr.h" 51 #include "target_core_ua.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/target.h> 55 56 static struct workqueue_struct *target_completion_wq; 57 static struct kmem_cache *se_sess_cache; 58 struct kmem_cache *se_ua_cache; 59 struct kmem_cache *t10_pr_reg_cache; 60 struct kmem_cache *t10_alua_lu_gp_cache; 61 struct kmem_cache *t10_alua_lu_gp_mem_cache; 62 struct kmem_cache *t10_alua_tg_pt_gp_cache; 63 struct kmem_cache *t10_alua_lba_map_cache; 64 struct kmem_cache *t10_alua_lba_map_mem_cache; 65 66 static void transport_complete_task_attr(struct se_cmd *cmd); 67 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); 68 static void transport_handle_queue_full(struct se_cmd *cmd, 69 struct se_device *dev, int err, bool write_pending); 70 static void target_complete_ok_work(struct work_struct *work); 71 72 int init_se_kmem_caches(void) 73 { 74 se_sess_cache = kmem_cache_create("se_sess_cache", 75 sizeof(struct se_session), __alignof__(struct se_session), 76 0, NULL); 77 if (!se_sess_cache) { 78 pr_err("kmem_cache_create() for struct se_session" 79 " failed\n"); 80 goto out; 81 } 82 se_ua_cache = kmem_cache_create("se_ua_cache", 83 sizeof(struct se_ua), __alignof__(struct se_ua), 84 0, NULL); 85 if (!se_ua_cache) { 86 pr_err("kmem_cache_create() for struct se_ua failed\n"); 87 goto out_free_sess_cache; 88 } 89 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 90 sizeof(struct t10_pr_registration), 91 __alignof__(struct t10_pr_registration), 0, NULL); 92 if (!t10_pr_reg_cache) { 93 pr_err("kmem_cache_create() for struct t10_pr_registration" 94 " failed\n"); 95 goto out_free_ua_cache; 96 } 97 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 98 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 99 0, NULL); 100 if (!t10_alua_lu_gp_cache) { 101 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 102 " failed\n"); 103 goto out_free_pr_reg_cache; 104 } 105 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 106 sizeof(struct t10_alua_lu_gp_member), 107 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 108 if (!t10_alua_lu_gp_mem_cache) { 109 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 110 "cache failed\n"); 111 goto out_free_lu_gp_cache; 112 } 113 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 114 sizeof(struct t10_alua_tg_pt_gp), 115 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 116 if (!t10_alua_tg_pt_gp_cache) { 117 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 118 "cache failed\n"); 119 goto out_free_lu_gp_mem_cache; 120 } 121 t10_alua_lba_map_cache = kmem_cache_create( 122 "t10_alua_lba_map_cache", 123 sizeof(struct t10_alua_lba_map), 124 __alignof__(struct t10_alua_lba_map), 0, NULL); 125 if (!t10_alua_lba_map_cache) { 126 pr_err("kmem_cache_create() for t10_alua_lba_map_" 127 "cache failed\n"); 128 goto out_free_tg_pt_gp_cache; 129 } 130 t10_alua_lba_map_mem_cache = kmem_cache_create( 131 "t10_alua_lba_map_mem_cache", 132 sizeof(struct t10_alua_lba_map_member), 133 __alignof__(struct t10_alua_lba_map_member), 0, NULL); 134 if (!t10_alua_lba_map_mem_cache) { 135 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" 136 "cache failed\n"); 137 goto out_free_lba_map_cache; 138 } 139 140 target_completion_wq = alloc_workqueue("target_completion", 141 WQ_MEM_RECLAIM, 0); 142 if (!target_completion_wq) 143 goto out_free_lba_map_mem_cache; 144 145 return 0; 146 147 out_free_lba_map_mem_cache: 148 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 149 out_free_lba_map_cache: 150 kmem_cache_destroy(t10_alua_lba_map_cache); 151 out_free_tg_pt_gp_cache: 152 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 153 out_free_lu_gp_mem_cache: 154 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 155 out_free_lu_gp_cache: 156 kmem_cache_destroy(t10_alua_lu_gp_cache); 157 out_free_pr_reg_cache: 158 kmem_cache_destroy(t10_pr_reg_cache); 159 out_free_ua_cache: 160 kmem_cache_destroy(se_ua_cache); 161 out_free_sess_cache: 162 kmem_cache_destroy(se_sess_cache); 163 out: 164 return -ENOMEM; 165 } 166 167 void release_se_kmem_caches(void) 168 { 169 destroy_workqueue(target_completion_wq); 170 kmem_cache_destroy(se_sess_cache); 171 kmem_cache_destroy(se_ua_cache); 172 kmem_cache_destroy(t10_pr_reg_cache); 173 kmem_cache_destroy(t10_alua_lu_gp_cache); 174 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 175 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 176 kmem_cache_destroy(t10_alua_lba_map_cache); 177 kmem_cache_destroy(t10_alua_lba_map_mem_cache); 178 } 179 180 /* This code ensures unique mib indexes are handed out. */ 181 static DEFINE_SPINLOCK(scsi_mib_index_lock); 182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 183 184 /* 185 * Allocate a new row index for the entry type specified 186 */ 187 u32 scsi_get_new_index(scsi_index_t type) 188 { 189 u32 new_index; 190 191 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 192 193 spin_lock(&scsi_mib_index_lock); 194 new_index = ++scsi_mib_index[type]; 195 spin_unlock(&scsi_mib_index_lock); 196 197 return new_index; 198 } 199 200 void transport_subsystem_check_init(void) 201 { 202 int ret; 203 static int sub_api_initialized; 204 205 if (sub_api_initialized) 206 return; 207 208 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); 209 if (ret != 0) 210 pr_err("Unable to load target_core_iblock\n"); 211 212 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); 213 if (ret != 0) 214 pr_err("Unable to load target_core_file\n"); 215 216 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); 217 if (ret != 0) 218 pr_err("Unable to load target_core_pscsi\n"); 219 220 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); 221 if (ret != 0) 222 pr_err("Unable to load target_core_user\n"); 223 224 sub_api_initialized = 1; 225 } 226 227 static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) 228 { 229 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count); 230 231 wake_up(&sess->cmd_list_wq); 232 } 233 234 /** 235 * transport_init_session - initialize a session object 236 * @se_sess: Session object pointer. 237 * 238 * The caller must have zero-initialized @se_sess before calling this function. 239 */ 240 int transport_init_session(struct se_session *se_sess) 241 { 242 INIT_LIST_HEAD(&se_sess->sess_list); 243 INIT_LIST_HEAD(&se_sess->sess_acl_list); 244 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 245 spin_lock_init(&se_sess->sess_cmd_lock); 246 init_waitqueue_head(&se_sess->cmd_list_wq); 247 return percpu_ref_init(&se_sess->cmd_count, 248 target_release_sess_cmd_refcnt, 0, GFP_KERNEL); 249 } 250 EXPORT_SYMBOL(transport_init_session); 251 252 /** 253 * transport_alloc_session - allocate a session object and initialize it 254 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 255 */ 256 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) 257 { 258 struct se_session *se_sess; 259 int ret; 260 261 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 262 if (!se_sess) { 263 pr_err("Unable to allocate struct se_session from" 264 " se_sess_cache\n"); 265 return ERR_PTR(-ENOMEM); 266 } 267 ret = transport_init_session(se_sess); 268 if (ret < 0) { 269 kmem_cache_free(se_sess_cache, se_sess); 270 return ERR_PTR(ret); 271 } 272 se_sess->sup_prot_ops = sup_prot_ops; 273 274 return se_sess; 275 } 276 EXPORT_SYMBOL(transport_alloc_session); 277 278 /** 279 * transport_alloc_session_tags - allocate target driver private data 280 * @se_sess: Session pointer. 281 * @tag_num: Maximum number of in-flight commands between initiator and target. 282 * @tag_size: Size in bytes of the private data a target driver associates with 283 * each command. 284 */ 285 int transport_alloc_session_tags(struct se_session *se_sess, 286 unsigned int tag_num, unsigned int tag_size) 287 { 288 int rc; 289 290 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num, 291 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 292 if (!se_sess->sess_cmd_map) { 293 pr_err("Unable to allocate se_sess->sess_cmd_map\n"); 294 return -ENOMEM; 295 } 296 297 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, 298 false, GFP_KERNEL, NUMA_NO_NODE); 299 if (rc < 0) { 300 pr_err("Unable to init se_sess->sess_tag_pool," 301 " tag_num: %u\n", tag_num); 302 kvfree(se_sess->sess_cmd_map); 303 se_sess->sess_cmd_map = NULL; 304 return -ENOMEM; 305 } 306 307 return 0; 308 } 309 EXPORT_SYMBOL(transport_alloc_session_tags); 310 311 /** 312 * transport_init_session_tags - allocate a session and target driver private data 313 * @tag_num: Maximum number of in-flight commands between initiator and target. 314 * @tag_size: Size in bytes of the private data a target driver associates with 315 * each command. 316 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. 317 */ 318 static struct se_session * 319 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, 320 enum target_prot_op sup_prot_ops) 321 { 322 struct se_session *se_sess; 323 int rc; 324 325 if (tag_num != 0 && !tag_size) { 326 pr_err("init_session_tags called with percpu-ida tag_num:" 327 " %u, but zero tag_size\n", tag_num); 328 return ERR_PTR(-EINVAL); 329 } 330 if (!tag_num && tag_size) { 331 pr_err("init_session_tags called with percpu-ida tag_size:" 332 " %u, but zero tag_num\n", tag_size); 333 return ERR_PTR(-EINVAL); 334 } 335 336 se_sess = transport_alloc_session(sup_prot_ops); 337 if (IS_ERR(se_sess)) 338 return se_sess; 339 340 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); 341 if (rc < 0) { 342 transport_free_session(se_sess); 343 return ERR_PTR(-ENOMEM); 344 } 345 346 return se_sess; 347 } 348 349 /* 350 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. 351 */ 352 void __transport_register_session( 353 struct se_portal_group *se_tpg, 354 struct se_node_acl *se_nacl, 355 struct se_session *se_sess, 356 void *fabric_sess_ptr) 357 { 358 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; 359 unsigned char buf[PR_REG_ISID_LEN]; 360 unsigned long flags; 361 362 se_sess->se_tpg = se_tpg; 363 se_sess->fabric_sess_ptr = fabric_sess_ptr; 364 /* 365 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 366 * 367 * Only set for struct se_session's that will actually be moving I/O. 368 * eg: *NOT* discovery sessions. 369 */ 370 if (se_nacl) { 371 /* 372 * 373 * Determine if fabric allows for T10-PI feature bits exposed to 374 * initiators for device backends with !dev->dev_attrib.pi_prot_type. 375 * 376 * If so, then always save prot_type on a per se_node_acl node 377 * basis and re-instate the previous sess_prot_type to avoid 378 * disabling PI from below any previously initiator side 379 * registered LUNs. 380 */ 381 if (se_nacl->saved_prot_type) 382 se_sess->sess_prot_type = se_nacl->saved_prot_type; 383 else if (tfo->tpg_check_prot_fabric_only) 384 se_sess->sess_prot_type = se_nacl->saved_prot_type = 385 tfo->tpg_check_prot_fabric_only(se_tpg); 386 /* 387 * If the fabric module supports an ISID based TransportID, 388 * save this value in binary from the fabric I_T Nexus now. 389 */ 390 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 391 memset(&buf[0], 0, PR_REG_ISID_LEN); 392 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 393 &buf[0], PR_REG_ISID_LEN); 394 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 395 } 396 397 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 398 /* 399 * The se_nacl->nacl_sess pointer will be set to the 400 * last active I_T Nexus for each struct se_node_acl. 401 */ 402 se_nacl->nacl_sess = se_sess; 403 404 list_add_tail(&se_sess->sess_acl_list, 405 &se_nacl->acl_sess_list); 406 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 407 } 408 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 409 410 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 411 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr); 412 } 413 EXPORT_SYMBOL(__transport_register_session); 414 415 void transport_register_session( 416 struct se_portal_group *se_tpg, 417 struct se_node_acl *se_nacl, 418 struct se_session *se_sess, 419 void *fabric_sess_ptr) 420 { 421 unsigned long flags; 422 423 spin_lock_irqsave(&se_tpg->session_lock, flags); 424 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 425 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 426 } 427 EXPORT_SYMBOL(transport_register_session); 428 429 struct se_session * 430 target_setup_session(struct se_portal_group *tpg, 431 unsigned int tag_num, unsigned int tag_size, 432 enum target_prot_op prot_op, 433 const char *initiatorname, void *private, 434 int (*callback)(struct se_portal_group *, 435 struct se_session *, void *)) 436 { 437 struct se_session *sess; 438 439 /* 440 * If the fabric driver is using percpu-ida based pre allocation 441 * of I/O descriptor tags, go ahead and perform that setup now.. 442 */ 443 if (tag_num != 0) 444 sess = transport_init_session_tags(tag_num, tag_size, prot_op); 445 else 446 sess = transport_alloc_session(prot_op); 447 448 if (IS_ERR(sess)) 449 return sess; 450 451 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, 452 (unsigned char *)initiatorname); 453 if (!sess->se_node_acl) { 454 transport_free_session(sess); 455 return ERR_PTR(-EACCES); 456 } 457 /* 458 * Go ahead and perform any remaining fabric setup that is 459 * required before transport_register_session(). 460 */ 461 if (callback != NULL) { 462 int rc = callback(tpg, sess, private); 463 if (rc) { 464 transport_free_session(sess); 465 return ERR_PTR(rc); 466 } 467 } 468 469 transport_register_session(tpg, sess->se_node_acl, sess, private); 470 return sess; 471 } 472 EXPORT_SYMBOL(target_setup_session); 473 474 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) 475 { 476 struct se_session *se_sess; 477 ssize_t len = 0; 478 479 spin_lock_bh(&se_tpg->session_lock); 480 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { 481 if (!se_sess->se_node_acl) 482 continue; 483 if (!se_sess->se_node_acl->dynamic_node_acl) 484 continue; 485 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) 486 break; 487 488 len += snprintf(page + len, PAGE_SIZE - len, "%s\n", 489 se_sess->se_node_acl->initiatorname); 490 len += 1; /* Include NULL terminator */ 491 } 492 spin_unlock_bh(&se_tpg->session_lock); 493 494 return len; 495 } 496 EXPORT_SYMBOL(target_show_dynamic_sessions); 497 498 static void target_complete_nacl(struct kref *kref) 499 { 500 struct se_node_acl *nacl = container_of(kref, 501 struct se_node_acl, acl_kref); 502 struct se_portal_group *se_tpg = nacl->se_tpg; 503 504 if (!nacl->dynamic_stop) { 505 complete(&nacl->acl_free_comp); 506 return; 507 } 508 509 mutex_lock(&se_tpg->acl_node_mutex); 510 list_del_init(&nacl->acl_list); 511 mutex_unlock(&se_tpg->acl_node_mutex); 512 513 core_tpg_wait_for_nacl_pr_ref(nacl); 514 core_free_device_list_for_node(nacl, se_tpg); 515 kfree(nacl); 516 } 517 518 void target_put_nacl(struct se_node_acl *nacl) 519 { 520 kref_put(&nacl->acl_kref, target_complete_nacl); 521 } 522 EXPORT_SYMBOL(target_put_nacl); 523 524 void transport_deregister_session_configfs(struct se_session *se_sess) 525 { 526 struct se_node_acl *se_nacl; 527 unsigned long flags; 528 /* 529 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 530 */ 531 se_nacl = se_sess->se_node_acl; 532 if (se_nacl) { 533 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 534 if (!list_empty(&se_sess->sess_acl_list)) 535 list_del_init(&se_sess->sess_acl_list); 536 /* 537 * If the session list is empty, then clear the pointer. 538 * Otherwise, set the struct se_session pointer from the tail 539 * element of the per struct se_node_acl active session list. 540 */ 541 if (list_empty(&se_nacl->acl_sess_list)) 542 se_nacl->nacl_sess = NULL; 543 else { 544 se_nacl->nacl_sess = container_of( 545 se_nacl->acl_sess_list.prev, 546 struct se_session, sess_acl_list); 547 } 548 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 549 } 550 } 551 EXPORT_SYMBOL(transport_deregister_session_configfs); 552 553 void transport_free_session(struct se_session *se_sess) 554 { 555 struct se_node_acl *se_nacl = se_sess->se_node_acl; 556 557 /* 558 * Drop the se_node_acl->nacl_kref obtained from within 559 * core_tpg_get_initiator_node_acl(). 560 */ 561 if (se_nacl) { 562 struct se_portal_group *se_tpg = se_nacl->se_tpg; 563 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; 564 unsigned long flags; 565 566 se_sess->se_node_acl = NULL; 567 568 /* 569 * Also determine if we need to drop the extra ->cmd_kref if 570 * it had been previously dynamically generated, and 571 * the endpoint is not caching dynamic ACLs. 572 */ 573 mutex_lock(&se_tpg->acl_node_mutex); 574 if (se_nacl->dynamic_node_acl && 575 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { 576 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 577 if (list_empty(&se_nacl->acl_sess_list)) 578 se_nacl->dynamic_stop = true; 579 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 580 581 if (se_nacl->dynamic_stop) 582 list_del_init(&se_nacl->acl_list); 583 } 584 mutex_unlock(&se_tpg->acl_node_mutex); 585 586 if (se_nacl->dynamic_stop) 587 target_put_nacl(se_nacl); 588 589 target_put_nacl(se_nacl); 590 } 591 if (se_sess->sess_cmd_map) { 592 sbitmap_queue_free(&se_sess->sess_tag_pool); 593 kvfree(se_sess->sess_cmd_map); 594 } 595 percpu_ref_exit(&se_sess->cmd_count); 596 kmem_cache_free(se_sess_cache, se_sess); 597 } 598 EXPORT_SYMBOL(transport_free_session); 599 600 void transport_deregister_session(struct se_session *se_sess) 601 { 602 struct se_portal_group *se_tpg = se_sess->se_tpg; 603 unsigned long flags; 604 605 if (!se_tpg) { 606 transport_free_session(se_sess); 607 return; 608 } 609 610 spin_lock_irqsave(&se_tpg->session_lock, flags); 611 list_del(&se_sess->sess_list); 612 se_sess->se_tpg = NULL; 613 se_sess->fabric_sess_ptr = NULL; 614 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 615 616 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 617 se_tpg->se_tpg_tfo->fabric_name); 618 /* 619 * If last kref is dropping now for an explicit NodeACL, awake sleeping 620 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 621 * removal context from within transport_free_session() code. 622 * 623 * For dynamic ACL, target_put_nacl() uses target_complete_nacl() 624 * to release all remaining generate_node_acl=1 created ACL resources. 625 */ 626 627 transport_free_session(se_sess); 628 } 629 EXPORT_SYMBOL(transport_deregister_session); 630 631 void target_remove_session(struct se_session *se_sess) 632 { 633 transport_deregister_session_configfs(se_sess); 634 transport_deregister_session(se_sess); 635 } 636 EXPORT_SYMBOL(target_remove_session); 637 638 static void target_remove_from_state_list(struct se_cmd *cmd) 639 { 640 struct se_device *dev = cmd->se_dev; 641 unsigned long flags; 642 643 if (!dev) 644 return; 645 646 spin_lock_irqsave(&dev->execute_task_lock, flags); 647 if (cmd->state_active) { 648 list_del(&cmd->state_list); 649 cmd->state_active = false; 650 } 651 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 652 } 653 654 /* 655 * This function is called by the target core after the target core has 656 * finished processing a SCSI command or SCSI TMF. Both the regular command 657 * processing code and the code for aborting commands can call this 658 * function. CMD_T_STOP is set if and only if another thread is waiting 659 * inside transport_wait_for_tasks() for t_transport_stop_comp. 660 */ 661 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 662 { 663 unsigned long flags; 664 665 target_remove_from_state_list(cmd); 666 667 /* 668 * Clear struct se_cmd->se_lun before the handoff to FE. 669 */ 670 cmd->se_lun = NULL; 671 672 spin_lock_irqsave(&cmd->t_state_lock, flags); 673 /* 674 * Determine if frontend context caller is requesting the stopping of 675 * this command for frontend exceptions. 676 */ 677 if (cmd->transport_state & CMD_T_STOP) { 678 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 679 __func__, __LINE__, cmd->tag); 680 681 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 682 683 complete_all(&cmd->t_transport_stop_comp); 684 return 1; 685 } 686 cmd->transport_state &= ~CMD_T_ACTIVE; 687 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 688 689 /* 690 * Some fabric modules like tcm_loop can release their internally 691 * allocated I/O reference and struct se_cmd now. 692 * 693 * Fabric modules are expected to return '1' here if the se_cmd being 694 * passed is released at this point, or zero if not being released. 695 */ 696 return cmd->se_tfo->check_stop_free(cmd); 697 } 698 699 static void transport_lun_remove_cmd(struct se_cmd *cmd) 700 { 701 struct se_lun *lun = cmd->se_lun; 702 703 if (!lun) 704 return; 705 706 if (cmpxchg(&cmd->lun_ref_active, true, false)) 707 percpu_ref_put(&lun->lun_ref); 708 } 709 710 static void target_complete_failure_work(struct work_struct *work) 711 { 712 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 713 714 transport_generic_request_failure(cmd, 715 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 716 } 717 718 /* 719 * Used when asking transport to copy Sense Data from the underlying 720 * Linux/SCSI struct scsi_cmnd 721 */ 722 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) 723 { 724 struct se_device *dev = cmd->se_dev; 725 726 WARN_ON(!cmd->se_lun); 727 728 if (!dev) 729 return NULL; 730 731 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) 732 return NULL; 733 734 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 735 736 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", 737 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); 738 return cmd->sense_buffer; 739 } 740 741 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) 742 { 743 unsigned char *cmd_sense_buf; 744 unsigned long flags; 745 746 spin_lock_irqsave(&cmd->t_state_lock, flags); 747 cmd_sense_buf = transport_get_sense_buffer(cmd); 748 if (!cmd_sense_buf) { 749 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 750 return; 751 } 752 753 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 754 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); 755 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 756 } 757 EXPORT_SYMBOL(transport_copy_sense_to_cmd); 758 759 static void target_handle_abort(struct se_cmd *cmd) 760 { 761 bool tas = cmd->transport_state & CMD_T_TAS; 762 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; 763 int ret; 764 765 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); 766 767 if (tas) { 768 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 769 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 770 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", 771 cmd->t_task_cdb[0], cmd->tag); 772 trace_target_cmd_complete(cmd); 773 ret = cmd->se_tfo->queue_status(cmd); 774 if (ret) { 775 transport_handle_queue_full(cmd, cmd->se_dev, 776 ret, false); 777 return; 778 } 779 } else { 780 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; 781 cmd->se_tfo->queue_tm_rsp(cmd); 782 } 783 } else { 784 /* 785 * Allow the fabric driver to unmap any resources before 786 * releasing the descriptor via TFO->release_cmd(). 787 */ 788 cmd->se_tfo->aborted_task(cmd); 789 if (ack_kref) 790 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); 791 /* 792 * To do: establish a unit attention condition on the I_T 793 * nexus associated with cmd. See also the paragraph "Aborting 794 * commands" in SAM. 795 */ 796 } 797 798 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); 799 800 transport_lun_remove_cmd(cmd); 801 802 transport_cmd_check_stop_to_fabric(cmd); 803 } 804 805 static void target_abort_work(struct work_struct *work) 806 { 807 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 808 809 target_handle_abort(cmd); 810 } 811 812 static bool target_cmd_interrupted(struct se_cmd *cmd) 813 { 814 int post_ret; 815 816 if (cmd->transport_state & CMD_T_ABORTED) { 817 if (cmd->transport_complete_callback) 818 cmd->transport_complete_callback(cmd, false, &post_ret); 819 INIT_WORK(&cmd->work, target_abort_work); 820 queue_work(target_completion_wq, &cmd->work); 821 return true; 822 } else if (cmd->transport_state & CMD_T_STOP) { 823 if (cmd->transport_complete_callback) 824 cmd->transport_complete_callback(cmd, false, &post_ret); 825 complete_all(&cmd->t_transport_stop_comp); 826 return true; 827 } 828 829 return false; 830 } 831 832 /* May be called from interrupt context so must not sleep. */ 833 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) 834 { 835 int success; 836 unsigned long flags; 837 838 if (target_cmd_interrupted(cmd)) 839 return; 840 841 cmd->scsi_status = scsi_status; 842 843 spin_lock_irqsave(&cmd->t_state_lock, flags); 844 switch (cmd->scsi_status) { 845 case SAM_STAT_CHECK_CONDITION: 846 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 847 success = 1; 848 else 849 success = 0; 850 break; 851 default: 852 success = 1; 853 break; 854 } 855 856 cmd->t_state = TRANSPORT_COMPLETE; 857 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 858 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 859 860 INIT_WORK(&cmd->work, success ? target_complete_ok_work : 861 target_complete_failure_work); 862 if (cmd->se_cmd_flags & SCF_USE_CPUID) 863 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 864 else 865 queue_work(target_completion_wq, &cmd->work); 866 } 867 EXPORT_SYMBOL(target_complete_cmd); 868 869 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) 870 { 871 if ((scsi_status == SAM_STAT_GOOD || 872 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 873 length < cmd->data_length) { 874 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 875 cmd->residual_count += cmd->data_length - length; 876 } else { 877 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 878 cmd->residual_count = cmd->data_length - length; 879 } 880 881 cmd->data_length = length; 882 } 883 884 target_complete_cmd(cmd, scsi_status); 885 } 886 EXPORT_SYMBOL(target_complete_cmd_with_length); 887 888 static void target_add_to_state_list(struct se_cmd *cmd) 889 { 890 struct se_device *dev = cmd->se_dev; 891 unsigned long flags; 892 893 spin_lock_irqsave(&dev->execute_task_lock, flags); 894 if (!cmd->state_active) { 895 list_add_tail(&cmd->state_list, &dev->state_list); 896 cmd->state_active = true; 897 } 898 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 899 } 900 901 /* 902 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 903 */ 904 static void transport_write_pending_qf(struct se_cmd *cmd); 905 static void transport_complete_qf(struct se_cmd *cmd); 906 907 void target_qf_do_work(struct work_struct *work) 908 { 909 struct se_device *dev = container_of(work, struct se_device, 910 qf_work_queue); 911 LIST_HEAD(qf_cmd_list); 912 struct se_cmd *cmd, *cmd_tmp; 913 914 spin_lock_irq(&dev->qf_cmd_lock); 915 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 916 spin_unlock_irq(&dev->qf_cmd_lock); 917 918 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 919 list_del(&cmd->se_qf_node); 920 atomic_dec_mb(&dev->dev_qf_count); 921 922 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 923 " context: %s\n", cmd->se_tfo->fabric_name, cmd, 924 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 925 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 926 : "UNKNOWN"); 927 928 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) 929 transport_write_pending_qf(cmd); 930 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || 931 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) 932 transport_complete_qf(cmd); 933 } 934 } 935 936 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 937 { 938 switch (cmd->data_direction) { 939 case DMA_NONE: 940 return "NONE"; 941 case DMA_FROM_DEVICE: 942 return "READ"; 943 case DMA_TO_DEVICE: 944 return "WRITE"; 945 case DMA_BIDIRECTIONAL: 946 return "BIDI"; 947 default: 948 break; 949 } 950 951 return "UNKNOWN"; 952 } 953 954 void transport_dump_dev_state( 955 struct se_device *dev, 956 char *b, 957 int *bl) 958 { 959 *bl += sprintf(b + *bl, "Status: "); 960 if (dev->export_count) 961 *bl += sprintf(b + *bl, "ACTIVATED"); 962 else 963 *bl += sprintf(b + *bl, "DEACTIVATED"); 964 965 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); 966 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", 967 dev->dev_attrib.block_size, 968 dev->dev_attrib.hw_max_sectors); 969 *bl += sprintf(b + *bl, " "); 970 } 971 972 void transport_dump_vpd_proto_id( 973 struct t10_vpd *vpd, 974 unsigned char *p_buf, 975 int p_buf_len) 976 { 977 unsigned char buf[VPD_TMP_BUF_SIZE]; 978 int len; 979 980 memset(buf, 0, VPD_TMP_BUF_SIZE); 981 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 982 983 switch (vpd->protocol_identifier) { 984 case 0x00: 985 sprintf(buf+len, "Fibre Channel\n"); 986 break; 987 case 0x10: 988 sprintf(buf+len, "Parallel SCSI\n"); 989 break; 990 case 0x20: 991 sprintf(buf+len, "SSA\n"); 992 break; 993 case 0x30: 994 sprintf(buf+len, "IEEE 1394\n"); 995 break; 996 case 0x40: 997 sprintf(buf+len, "SCSI Remote Direct Memory Access" 998 " Protocol\n"); 999 break; 1000 case 0x50: 1001 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 1002 break; 1003 case 0x60: 1004 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 1005 break; 1006 case 0x70: 1007 sprintf(buf+len, "Automation/Drive Interface Transport" 1008 " Protocol\n"); 1009 break; 1010 case 0x80: 1011 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 1012 break; 1013 default: 1014 sprintf(buf+len, "Unknown 0x%02x\n", 1015 vpd->protocol_identifier); 1016 break; 1017 } 1018 1019 if (p_buf) 1020 strncpy(p_buf, buf, p_buf_len); 1021 else 1022 pr_debug("%s", buf); 1023 } 1024 1025 void 1026 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 1027 { 1028 /* 1029 * Check if the Protocol Identifier Valid (PIV) bit is set.. 1030 * 1031 * from spc3r23.pdf section 7.5.1 1032 */ 1033 if (page_83[1] & 0x80) { 1034 vpd->protocol_identifier = (page_83[0] & 0xf0); 1035 vpd->protocol_identifier_set = 1; 1036 transport_dump_vpd_proto_id(vpd, NULL, 0); 1037 } 1038 } 1039 EXPORT_SYMBOL(transport_set_vpd_proto_id); 1040 1041 int transport_dump_vpd_assoc( 1042 struct t10_vpd *vpd, 1043 unsigned char *p_buf, 1044 int p_buf_len) 1045 { 1046 unsigned char buf[VPD_TMP_BUF_SIZE]; 1047 int ret = 0; 1048 int len; 1049 1050 memset(buf, 0, VPD_TMP_BUF_SIZE); 1051 len = sprintf(buf, "T10 VPD Identifier Association: "); 1052 1053 switch (vpd->association) { 1054 case 0x00: 1055 sprintf(buf+len, "addressed logical unit\n"); 1056 break; 1057 case 0x10: 1058 sprintf(buf+len, "target port\n"); 1059 break; 1060 case 0x20: 1061 sprintf(buf+len, "SCSI target device\n"); 1062 break; 1063 default: 1064 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 1065 ret = -EINVAL; 1066 break; 1067 } 1068 1069 if (p_buf) 1070 strncpy(p_buf, buf, p_buf_len); 1071 else 1072 pr_debug("%s", buf); 1073 1074 return ret; 1075 } 1076 1077 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 1078 { 1079 /* 1080 * The VPD identification association.. 1081 * 1082 * from spc3r23.pdf Section 7.6.3.1 Table 297 1083 */ 1084 vpd->association = (page_83[1] & 0x30); 1085 return transport_dump_vpd_assoc(vpd, NULL, 0); 1086 } 1087 EXPORT_SYMBOL(transport_set_vpd_assoc); 1088 1089 int transport_dump_vpd_ident_type( 1090 struct t10_vpd *vpd, 1091 unsigned char *p_buf, 1092 int p_buf_len) 1093 { 1094 unsigned char buf[VPD_TMP_BUF_SIZE]; 1095 int ret = 0; 1096 int len; 1097 1098 memset(buf, 0, VPD_TMP_BUF_SIZE); 1099 len = sprintf(buf, "T10 VPD Identifier Type: "); 1100 1101 switch (vpd->device_identifier_type) { 1102 case 0x00: 1103 sprintf(buf+len, "Vendor specific\n"); 1104 break; 1105 case 0x01: 1106 sprintf(buf+len, "T10 Vendor ID based\n"); 1107 break; 1108 case 0x02: 1109 sprintf(buf+len, "EUI-64 based\n"); 1110 break; 1111 case 0x03: 1112 sprintf(buf+len, "NAA\n"); 1113 break; 1114 case 0x04: 1115 sprintf(buf+len, "Relative target port identifier\n"); 1116 break; 1117 case 0x08: 1118 sprintf(buf+len, "SCSI name string\n"); 1119 break; 1120 default: 1121 sprintf(buf+len, "Unsupported: 0x%02x\n", 1122 vpd->device_identifier_type); 1123 ret = -EINVAL; 1124 break; 1125 } 1126 1127 if (p_buf) { 1128 if (p_buf_len < strlen(buf)+1) 1129 return -EINVAL; 1130 strncpy(p_buf, buf, p_buf_len); 1131 } else { 1132 pr_debug("%s", buf); 1133 } 1134 1135 return ret; 1136 } 1137 1138 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1139 { 1140 /* 1141 * The VPD identifier type.. 1142 * 1143 * from spc3r23.pdf Section 7.6.3.1 Table 298 1144 */ 1145 vpd->device_identifier_type = (page_83[1] & 0x0f); 1146 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1147 } 1148 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1149 1150 int transport_dump_vpd_ident( 1151 struct t10_vpd *vpd, 1152 unsigned char *p_buf, 1153 int p_buf_len) 1154 { 1155 unsigned char buf[VPD_TMP_BUF_SIZE]; 1156 int ret = 0; 1157 1158 memset(buf, 0, VPD_TMP_BUF_SIZE); 1159 1160 switch (vpd->device_identifier_code_set) { 1161 case 0x01: /* Binary */ 1162 snprintf(buf, sizeof(buf), 1163 "T10 VPD Binary Device Identifier: %s\n", 1164 &vpd->device_identifier[0]); 1165 break; 1166 case 0x02: /* ASCII */ 1167 snprintf(buf, sizeof(buf), 1168 "T10 VPD ASCII Device Identifier: %s\n", 1169 &vpd->device_identifier[0]); 1170 break; 1171 case 0x03: /* UTF-8 */ 1172 snprintf(buf, sizeof(buf), 1173 "T10 VPD UTF-8 Device Identifier: %s\n", 1174 &vpd->device_identifier[0]); 1175 break; 1176 default: 1177 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1178 " 0x%02x", vpd->device_identifier_code_set); 1179 ret = -EINVAL; 1180 break; 1181 } 1182 1183 if (p_buf) 1184 strncpy(p_buf, buf, p_buf_len); 1185 else 1186 pr_debug("%s", buf); 1187 1188 return ret; 1189 } 1190 1191 int 1192 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1193 { 1194 static const char hex_str[] = "0123456789abcdef"; 1195 int j = 0, i = 4; /* offset to start of the identifier */ 1196 1197 /* 1198 * The VPD Code Set (encoding) 1199 * 1200 * from spc3r23.pdf Section 7.6.3.1 Table 296 1201 */ 1202 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1203 switch (vpd->device_identifier_code_set) { 1204 case 0x01: /* Binary */ 1205 vpd->device_identifier[j++] = 1206 hex_str[vpd->device_identifier_type]; 1207 while (i < (4 + page_83[3])) { 1208 vpd->device_identifier[j++] = 1209 hex_str[(page_83[i] & 0xf0) >> 4]; 1210 vpd->device_identifier[j++] = 1211 hex_str[page_83[i] & 0x0f]; 1212 i++; 1213 } 1214 break; 1215 case 0x02: /* ASCII */ 1216 case 0x03: /* UTF-8 */ 1217 while (i < (4 + page_83[3])) 1218 vpd->device_identifier[j++] = page_83[i++]; 1219 break; 1220 default: 1221 break; 1222 } 1223 1224 return transport_dump_vpd_ident(vpd, NULL, 0); 1225 } 1226 EXPORT_SYMBOL(transport_set_vpd_ident); 1227 1228 static sense_reason_t 1229 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, 1230 unsigned int size) 1231 { 1232 u32 mtl; 1233 1234 if (!cmd->se_tfo->max_data_sg_nents) 1235 return TCM_NO_SENSE; 1236 /* 1237 * Check if fabric enforced maximum SGL entries per I/O descriptor 1238 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + 1239 * residual_count and reduce original cmd->data_length to maximum 1240 * length based on single PAGE_SIZE entry scatter-lists. 1241 */ 1242 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); 1243 if (cmd->data_length > mtl) { 1244 /* 1245 * If an existing CDB overflow is present, calculate new residual 1246 * based on CDB size minus fabric maximum transfer length. 1247 * 1248 * If an existing CDB underflow is present, calculate new residual 1249 * based on original cmd->data_length minus fabric maximum transfer 1250 * length. 1251 * 1252 * Otherwise, set the underflow residual based on cmd->data_length 1253 * minus fabric maximum transfer length. 1254 */ 1255 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1256 cmd->residual_count = (size - mtl); 1257 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1258 u32 orig_dl = size + cmd->residual_count; 1259 cmd->residual_count = (orig_dl - mtl); 1260 } else { 1261 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1262 cmd->residual_count = (cmd->data_length - mtl); 1263 } 1264 cmd->data_length = mtl; 1265 /* 1266 * Reset sbc_check_prot() calculated protection payload 1267 * length based upon the new smaller MTL. 1268 */ 1269 if (cmd->prot_length) { 1270 u32 sectors = (mtl / dev->dev_attrib.block_size); 1271 cmd->prot_length = dev->prot_length * sectors; 1272 } 1273 } 1274 return TCM_NO_SENSE; 1275 } 1276 1277 sense_reason_t 1278 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) 1279 { 1280 struct se_device *dev = cmd->se_dev; 1281 1282 if (cmd->unknown_data_length) { 1283 cmd->data_length = size; 1284 } else if (size != cmd->data_length) { 1285 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1286 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1287 " 0x%02x\n", cmd->se_tfo->fabric_name, 1288 cmd->data_length, size, cmd->t_task_cdb[0]); 1289 1290 if (cmd->data_direction == DMA_TO_DEVICE) { 1291 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1292 pr_err_ratelimited("Rejecting underflow/overflow" 1293 " for WRITE data CDB\n"); 1294 return TCM_INVALID_CDB_FIELD; 1295 } 1296 /* 1297 * Some fabric drivers like iscsi-target still expect to 1298 * always reject overflow writes. Reject this case until 1299 * full fabric driver level support for overflow writes 1300 * is introduced tree-wide. 1301 */ 1302 if (size > cmd->data_length) { 1303 pr_err_ratelimited("Rejecting overflow for" 1304 " WRITE control CDB\n"); 1305 return TCM_INVALID_CDB_FIELD; 1306 } 1307 } 1308 /* 1309 * Reject READ_* or WRITE_* with overflow/underflow for 1310 * type SCF_SCSI_DATA_CDB. 1311 */ 1312 if (dev->dev_attrib.block_size != 512) { 1313 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 1314 " CDB on non 512-byte sector setup subsystem" 1315 " plugin: %s\n", dev->transport->name); 1316 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 1317 return TCM_INVALID_CDB_FIELD; 1318 } 1319 /* 1320 * For the overflow case keep the existing fabric provided 1321 * ->data_length. Otherwise for the underflow case, reset 1322 * ->data_length to the smaller SCSI expected data transfer 1323 * length. 1324 */ 1325 if (size > cmd->data_length) { 1326 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 1327 cmd->residual_count = (size - cmd->data_length); 1328 } else { 1329 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 1330 cmd->residual_count = (cmd->data_length - size); 1331 cmd->data_length = size; 1332 } 1333 } 1334 1335 return target_check_max_data_sg_nents(cmd, dev, size); 1336 1337 } 1338 1339 /* 1340 * Used by fabric modules containing a local struct se_cmd within their 1341 * fabric dependent per I/O descriptor. 1342 * 1343 * Preserves the value of @cmd->tag. 1344 */ 1345 void transport_init_se_cmd( 1346 struct se_cmd *cmd, 1347 const struct target_core_fabric_ops *tfo, 1348 struct se_session *se_sess, 1349 u32 data_length, 1350 int data_direction, 1351 int task_attr, 1352 unsigned char *sense_buffer) 1353 { 1354 INIT_LIST_HEAD(&cmd->se_delayed_node); 1355 INIT_LIST_HEAD(&cmd->se_qf_node); 1356 INIT_LIST_HEAD(&cmd->se_cmd_list); 1357 INIT_LIST_HEAD(&cmd->state_list); 1358 init_completion(&cmd->t_transport_stop_comp); 1359 cmd->free_compl = NULL; 1360 cmd->abrt_compl = NULL; 1361 spin_lock_init(&cmd->t_state_lock); 1362 INIT_WORK(&cmd->work, NULL); 1363 kref_init(&cmd->cmd_kref); 1364 1365 cmd->se_tfo = tfo; 1366 cmd->se_sess = se_sess; 1367 cmd->data_length = data_length; 1368 cmd->data_direction = data_direction; 1369 cmd->sam_task_attr = task_attr; 1370 cmd->sense_buffer = sense_buffer; 1371 1372 cmd->state_active = false; 1373 } 1374 EXPORT_SYMBOL(transport_init_se_cmd); 1375 1376 static sense_reason_t 1377 transport_check_alloc_task_attr(struct se_cmd *cmd) 1378 { 1379 struct se_device *dev = cmd->se_dev; 1380 1381 /* 1382 * Check if SAM Task Attribute emulation is enabled for this 1383 * struct se_device storage object 1384 */ 1385 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1386 return 0; 1387 1388 if (cmd->sam_task_attr == TCM_ACA_TAG) { 1389 pr_debug("SAM Task Attribute ACA" 1390 " emulation is not supported\n"); 1391 return TCM_INVALID_CDB_FIELD; 1392 } 1393 1394 return 0; 1395 } 1396 1397 sense_reason_t 1398 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) 1399 { 1400 struct se_device *dev = cmd->se_dev; 1401 sense_reason_t ret; 1402 1403 /* 1404 * Ensure that the received CDB is less than the max (252 + 8) bytes 1405 * for VARIABLE_LENGTH_CMD 1406 */ 1407 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1408 pr_err("Received SCSI CDB with command_size: %d that" 1409 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1410 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1411 return TCM_INVALID_CDB_FIELD; 1412 } 1413 /* 1414 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1415 * allocate the additional extended CDB buffer now.. Otherwise 1416 * setup the pointer from __t_task_cdb to t_task_cdb. 1417 */ 1418 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1419 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1420 GFP_KERNEL); 1421 if (!cmd->t_task_cdb) { 1422 pr_err("Unable to allocate cmd->t_task_cdb" 1423 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1424 scsi_command_size(cdb), 1425 (unsigned long)sizeof(cmd->__t_task_cdb)); 1426 return TCM_OUT_OF_RESOURCES; 1427 } 1428 } else 1429 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1430 /* 1431 * Copy the original CDB into cmd-> 1432 */ 1433 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1434 1435 trace_target_sequencer_start(cmd); 1436 1437 ret = dev->transport->parse_cdb(cmd); 1438 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) 1439 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", 1440 cmd->se_tfo->fabric_name, 1441 cmd->se_sess->se_node_acl->initiatorname, 1442 cmd->t_task_cdb[0]); 1443 if (ret) 1444 return ret; 1445 1446 ret = transport_check_alloc_task_attr(cmd); 1447 if (ret) 1448 return ret; 1449 1450 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1451 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); 1452 return 0; 1453 } 1454 EXPORT_SYMBOL(target_setup_cmd_from_cdb); 1455 1456 /* 1457 * Used by fabric module frontends to queue tasks directly. 1458 * May only be used from process context. 1459 */ 1460 int transport_handle_cdb_direct( 1461 struct se_cmd *cmd) 1462 { 1463 sense_reason_t ret; 1464 1465 if (!cmd->se_lun) { 1466 dump_stack(); 1467 pr_err("cmd->se_lun is NULL\n"); 1468 return -EINVAL; 1469 } 1470 if (in_interrupt()) { 1471 dump_stack(); 1472 pr_err("transport_generic_handle_cdb cannot be called" 1473 " from interrupt context\n"); 1474 return -EINVAL; 1475 } 1476 /* 1477 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that 1478 * outstanding descriptors are handled correctly during shutdown via 1479 * transport_wait_for_tasks() 1480 * 1481 * Also, we don't take cmd->t_state_lock here as we only expect 1482 * this to be called for initial descriptor submission. 1483 */ 1484 cmd->t_state = TRANSPORT_NEW_CMD; 1485 cmd->transport_state |= CMD_T_ACTIVE; 1486 1487 /* 1488 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1489 * so follow TRANSPORT_NEW_CMD processing thread context usage 1490 * and call transport_generic_request_failure() if necessary.. 1491 */ 1492 ret = transport_generic_new_cmd(cmd); 1493 if (ret) 1494 transport_generic_request_failure(cmd, ret); 1495 return 0; 1496 } 1497 EXPORT_SYMBOL(transport_handle_cdb_direct); 1498 1499 sense_reason_t 1500 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 1501 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) 1502 { 1503 if (!sgl || !sgl_count) 1504 return 0; 1505 1506 /* 1507 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 1508 * scatterlists already have been set to follow what the fabric 1509 * passes for the original expected data transfer length. 1510 */ 1511 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1512 pr_warn("Rejecting SCSI DATA overflow for fabric using" 1513 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 1514 return TCM_INVALID_CDB_FIELD; 1515 } 1516 1517 cmd->t_data_sg = sgl; 1518 cmd->t_data_nents = sgl_count; 1519 cmd->t_bidi_data_sg = sgl_bidi; 1520 cmd->t_bidi_data_nents = sgl_bidi_count; 1521 1522 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 1523 return 0; 1524 } 1525 1526 /** 1527 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized 1528 * se_cmd + use pre-allocated SGL memory. 1529 * 1530 * @se_cmd: command descriptor to submit 1531 * @se_sess: associated se_sess for endpoint 1532 * @cdb: pointer to SCSI CDB 1533 * @sense: pointer to SCSI sense buffer 1534 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1535 * @data_length: fabric expected data transfer length 1536 * @task_attr: SAM task attribute 1537 * @data_dir: DMA data direction 1538 * @flags: flags for command submission from target_sc_flags_tables 1539 * @sgl: struct scatterlist memory for unidirectional mapping 1540 * @sgl_count: scatterlist count for unidirectional mapping 1541 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping 1542 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping 1543 * @sgl_prot: struct scatterlist memory protection information 1544 * @sgl_prot_count: scatterlist count for protection information 1545 * 1546 * Task tags are supported if the caller has set @se_cmd->tag. 1547 * 1548 * Returns non zero to signal active I/O shutdown failure. All other 1549 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1550 * but still return zero here. 1551 * 1552 * This may only be called from process context, and also currently 1553 * assumes internal allocation of fabric payload buffer by target-core. 1554 */ 1555 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, 1556 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1557 u32 data_length, int task_attr, int data_dir, int flags, 1558 struct scatterlist *sgl, u32 sgl_count, 1559 struct scatterlist *sgl_bidi, u32 sgl_bidi_count, 1560 struct scatterlist *sgl_prot, u32 sgl_prot_count) 1561 { 1562 struct se_portal_group *se_tpg; 1563 sense_reason_t rc; 1564 int ret; 1565 1566 se_tpg = se_sess->se_tpg; 1567 BUG_ON(!se_tpg); 1568 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1569 BUG_ON(in_interrupt()); 1570 /* 1571 * Initialize se_cmd for target operation. From this point 1572 * exceptions are handled by sending exception status via 1573 * target_core_fabric_ops->queue_status() callback 1574 */ 1575 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1576 data_length, data_dir, task_attr, sense); 1577 1578 if (flags & TARGET_SCF_USE_CPUID) 1579 se_cmd->se_cmd_flags |= SCF_USE_CPUID; 1580 else 1581 se_cmd->cpuid = WORK_CPU_UNBOUND; 1582 1583 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1584 se_cmd->unknown_data_length = 1; 1585 /* 1586 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1587 * se_sess->sess_cmd_list. A second kref_get here is necessary 1588 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1589 * kref_put() to happen during fabric packet acknowledgement. 1590 */ 1591 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1592 if (ret) 1593 return ret; 1594 /* 1595 * Signal bidirectional data payloads to target-core 1596 */ 1597 if (flags & TARGET_SCF_BIDI_OP) 1598 se_cmd->se_cmd_flags |= SCF_BIDI; 1599 /* 1600 * Locate se_lun pointer and attach it to struct se_cmd 1601 */ 1602 rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); 1603 if (rc) { 1604 transport_send_check_condition_and_sense(se_cmd, rc, 0); 1605 target_put_sess_cmd(se_cmd); 1606 return 0; 1607 } 1608 1609 rc = target_setup_cmd_from_cdb(se_cmd, cdb); 1610 if (rc != 0) { 1611 transport_generic_request_failure(se_cmd, rc); 1612 return 0; 1613 } 1614 1615 /* 1616 * Save pointers for SGLs containing protection information, 1617 * if present. 1618 */ 1619 if (sgl_prot_count) { 1620 se_cmd->t_prot_sg = sgl_prot; 1621 se_cmd->t_prot_nents = sgl_prot_count; 1622 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; 1623 } 1624 1625 /* 1626 * When a non zero sgl_count has been passed perform SGL passthrough 1627 * mapping for pre-allocated fabric memory instead of having target 1628 * core perform an internal SGL allocation.. 1629 */ 1630 if (sgl_count != 0) { 1631 BUG_ON(!sgl); 1632 1633 /* 1634 * A work-around for tcm_loop as some userspace code via 1635 * scsi-generic do not memset their associated read buffers, 1636 * so go ahead and do that here for type non-data CDBs. Also 1637 * note that this is currently guaranteed to be a single SGL 1638 * for this case by target core in target_setup_cmd_from_cdb() 1639 * -> transport_generic_cmd_sequencer(). 1640 */ 1641 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 1642 se_cmd->data_direction == DMA_FROM_DEVICE) { 1643 unsigned char *buf = NULL; 1644 1645 if (sgl) 1646 buf = kmap(sg_page(sgl)) + sgl->offset; 1647 1648 if (buf) { 1649 memset(buf, 0, sgl->length); 1650 kunmap(sg_page(sgl)); 1651 } 1652 } 1653 1654 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, 1655 sgl_bidi, sgl_bidi_count); 1656 if (rc != 0) { 1657 transport_generic_request_failure(se_cmd, rc); 1658 return 0; 1659 } 1660 } 1661 1662 /* 1663 * Check if we need to delay processing because of ALUA 1664 * Active/NonOptimized primary access state.. 1665 */ 1666 core_alua_check_nonop_delay(se_cmd); 1667 1668 transport_handle_cdb_direct(se_cmd); 1669 return 0; 1670 } 1671 EXPORT_SYMBOL(target_submit_cmd_map_sgls); 1672 1673 /** 1674 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1675 * 1676 * @se_cmd: command descriptor to submit 1677 * @se_sess: associated se_sess for endpoint 1678 * @cdb: pointer to SCSI CDB 1679 * @sense: pointer to SCSI sense buffer 1680 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1681 * @data_length: fabric expected data transfer length 1682 * @task_attr: SAM task attribute 1683 * @data_dir: DMA data direction 1684 * @flags: flags for command submission from target_sc_flags_tables 1685 * 1686 * Task tags are supported if the caller has set @se_cmd->tag. 1687 * 1688 * Returns non zero to signal active I/O shutdown failure. All other 1689 * setup exceptions will be returned as a SCSI CHECK_CONDITION response, 1690 * but still return zero here. 1691 * 1692 * This may only be called from process context, and also currently 1693 * assumes internal allocation of fabric payload buffer by target-core. 1694 * 1695 * It also assumes interal target core SGL memory allocation. 1696 */ 1697 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1698 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, 1699 u32 data_length, int task_attr, int data_dir, int flags) 1700 { 1701 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, 1702 unpacked_lun, data_length, task_attr, data_dir, 1703 flags, NULL, 0, NULL, 0, NULL, 0); 1704 } 1705 EXPORT_SYMBOL(target_submit_cmd); 1706 1707 static void target_complete_tmr_failure(struct work_struct *work) 1708 { 1709 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); 1710 1711 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; 1712 se_cmd->se_tfo->queue_tm_rsp(se_cmd); 1713 1714 transport_lun_remove_cmd(se_cmd); 1715 transport_cmd_check_stop_to_fabric(se_cmd); 1716 } 1717 1718 static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag, 1719 u64 *unpacked_lun) 1720 { 1721 struct se_cmd *se_cmd; 1722 unsigned long flags; 1723 bool ret = false; 1724 1725 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 1726 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1727 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 1728 continue; 1729 1730 if (se_cmd->tag == tag) { 1731 *unpacked_lun = se_cmd->orig_fe_lun; 1732 ret = true; 1733 break; 1734 } 1735 } 1736 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 1737 1738 return ret; 1739 } 1740 1741 /** 1742 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd 1743 * for TMR CDBs 1744 * 1745 * @se_cmd: command descriptor to submit 1746 * @se_sess: associated se_sess for endpoint 1747 * @sense: pointer to SCSI sense buffer 1748 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1749 * @fabric_tmr_ptr: fabric context for TMR req 1750 * @tm_type: Type of TM request 1751 * @gfp: gfp type for caller 1752 * @tag: referenced task tag for TMR_ABORT_TASK 1753 * @flags: submit cmd flags 1754 * 1755 * Callable from all contexts. 1756 **/ 1757 1758 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, 1759 unsigned char *sense, u64 unpacked_lun, 1760 void *fabric_tmr_ptr, unsigned char tm_type, 1761 gfp_t gfp, u64 tag, int flags) 1762 { 1763 struct se_portal_group *se_tpg; 1764 int ret; 1765 1766 se_tpg = se_sess->se_tpg; 1767 BUG_ON(!se_tpg); 1768 1769 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1770 0, DMA_NONE, TCM_SIMPLE_TAG, sense); 1771 /* 1772 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1773 * allocation failure. 1774 */ 1775 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); 1776 if (ret < 0) 1777 return -ENOMEM; 1778 1779 if (tm_type == TMR_ABORT_TASK) 1780 se_cmd->se_tmr_req->ref_task_tag = tag; 1781 1782 /* See target_submit_cmd for commentary */ 1783 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); 1784 if (ret) { 1785 core_tmr_release_req(se_cmd->se_tmr_req); 1786 return ret; 1787 } 1788 /* 1789 * If this is ABORT_TASK with no explicit fabric provided LUN, 1790 * go ahead and search active session tags for a match to figure 1791 * out unpacked_lun for the original se_cmd. 1792 */ 1793 if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) { 1794 if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun)) 1795 goto failure; 1796 } 1797 1798 ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); 1799 if (ret) 1800 goto failure; 1801 1802 transport_generic_handle_tmr(se_cmd); 1803 return 0; 1804 1805 /* 1806 * For callback during failure handling, push this work off 1807 * to process context with TMR_LUN_DOES_NOT_EXIST status. 1808 */ 1809 failure: 1810 INIT_WORK(&se_cmd->work, target_complete_tmr_failure); 1811 schedule_work(&se_cmd->work); 1812 return 0; 1813 } 1814 EXPORT_SYMBOL(target_submit_tmr); 1815 1816 /* 1817 * Handle SAM-esque emulation for generic transport request failures. 1818 */ 1819 void transport_generic_request_failure(struct se_cmd *cmd, 1820 sense_reason_t sense_reason) 1821 { 1822 int ret = 0, post_ret; 1823 1824 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", 1825 sense_reason); 1826 target_show_cmd("-----[ ", cmd); 1827 1828 /* 1829 * For SAM Task Attribute emulation for failed struct se_cmd 1830 */ 1831 transport_complete_task_attr(cmd); 1832 1833 if (cmd->transport_complete_callback) 1834 cmd->transport_complete_callback(cmd, false, &post_ret); 1835 1836 if (cmd->transport_state & CMD_T_ABORTED) { 1837 INIT_WORK(&cmd->work, target_abort_work); 1838 queue_work(target_completion_wq, &cmd->work); 1839 return; 1840 } 1841 1842 switch (sense_reason) { 1843 case TCM_NON_EXISTENT_LUN: 1844 case TCM_UNSUPPORTED_SCSI_OPCODE: 1845 case TCM_INVALID_CDB_FIELD: 1846 case TCM_INVALID_PARAMETER_LIST: 1847 case TCM_PARAMETER_LIST_LENGTH_ERROR: 1848 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1849 case TCM_UNKNOWN_MODE_PAGE: 1850 case TCM_WRITE_PROTECTED: 1851 case TCM_ADDRESS_OUT_OF_RANGE: 1852 case TCM_CHECK_CONDITION_ABORT_CMD: 1853 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1854 case TCM_CHECK_CONDITION_NOT_READY: 1855 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: 1856 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1857 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1858 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: 1859 case TCM_TOO_MANY_TARGET_DESCS: 1860 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: 1861 case TCM_TOO_MANY_SEGMENT_DESCS: 1862 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: 1863 break; 1864 case TCM_OUT_OF_RESOURCES: 1865 cmd->scsi_status = SAM_STAT_TASK_SET_FULL; 1866 goto queue_status; 1867 case TCM_LUN_BUSY: 1868 cmd->scsi_status = SAM_STAT_BUSY; 1869 goto queue_status; 1870 case TCM_RESERVATION_CONFLICT: 1871 /* 1872 * No SENSE Data payload for this case, set SCSI Status 1873 * and queue the response to $FABRIC_MOD. 1874 * 1875 * Uses linux/include/scsi/scsi.h SAM status codes defs 1876 */ 1877 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1878 /* 1879 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1880 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1881 * CONFLICT STATUS. 1882 * 1883 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1884 */ 1885 if (cmd->se_sess && 1886 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) { 1887 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 1888 cmd->orig_fe_lun, 0x2C, 1889 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1890 } 1891 1892 goto queue_status; 1893 default: 1894 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1895 cmd->t_task_cdb[0], sense_reason); 1896 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1897 break; 1898 } 1899 1900 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); 1901 if (ret) 1902 goto queue_full; 1903 1904 check_stop: 1905 transport_lun_remove_cmd(cmd); 1906 transport_cmd_check_stop_to_fabric(cmd); 1907 return; 1908 1909 queue_status: 1910 trace_target_cmd_complete(cmd); 1911 ret = cmd->se_tfo->queue_status(cmd); 1912 if (!ret) 1913 goto check_stop; 1914 queue_full: 1915 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 1916 } 1917 EXPORT_SYMBOL(transport_generic_request_failure); 1918 1919 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) 1920 { 1921 sense_reason_t ret; 1922 1923 if (!cmd->execute_cmd) { 1924 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1925 goto err; 1926 } 1927 if (do_checks) { 1928 /* 1929 * Check for an existing UNIT ATTENTION condition after 1930 * target_handle_task_attr() has done SAM task attr 1931 * checking, and possibly have already defered execution 1932 * out to target_restart_delayed_cmds() context. 1933 */ 1934 ret = target_scsi3_ua_check(cmd); 1935 if (ret) 1936 goto err; 1937 1938 ret = target_alua_state_check(cmd); 1939 if (ret) 1940 goto err; 1941 1942 ret = target_check_reservation(cmd); 1943 if (ret) { 1944 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1945 goto err; 1946 } 1947 } 1948 1949 ret = cmd->execute_cmd(cmd); 1950 if (!ret) 1951 return; 1952 err: 1953 spin_lock_irq(&cmd->t_state_lock); 1954 cmd->transport_state &= ~CMD_T_SENT; 1955 spin_unlock_irq(&cmd->t_state_lock); 1956 1957 transport_generic_request_failure(cmd, ret); 1958 } 1959 1960 static int target_write_prot_action(struct se_cmd *cmd) 1961 { 1962 u32 sectors; 1963 /* 1964 * Perform WRITE_INSERT of PI using software emulation when backend 1965 * device has PI enabled, if the transport has not already generated 1966 * PI using hardware WRITE_INSERT offload. 1967 */ 1968 switch (cmd->prot_op) { 1969 case TARGET_PROT_DOUT_INSERT: 1970 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) 1971 sbc_dif_generate(cmd); 1972 break; 1973 case TARGET_PROT_DOUT_STRIP: 1974 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) 1975 break; 1976 1977 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); 1978 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 1979 sectors, 0, cmd->t_prot_sg, 0); 1980 if (unlikely(cmd->pi_err)) { 1981 spin_lock_irq(&cmd->t_state_lock); 1982 cmd->transport_state &= ~CMD_T_SENT; 1983 spin_unlock_irq(&cmd->t_state_lock); 1984 transport_generic_request_failure(cmd, cmd->pi_err); 1985 return -1; 1986 } 1987 break; 1988 default: 1989 break; 1990 } 1991 1992 return 0; 1993 } 1994 1995 static bool target_handle_task_attr(struct se_cmd *cmd) 1996 { 1997 struct se_device *dev = cmd->se_dev; 1998 1999 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2000 return false; 2001 2002 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; 2003 2004 /* 2005 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 2006 * to allow the passed struct se_cmd list of tasks to the front of the list. 2007 */ 2008 switch (cmd->sam_task_attr) { 2009 case TCM_HEAD_TAG: 2010 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", 2011 cmd->t_task_cdb[0]); 2012 return false; 2013 case TCM_ORDERED_TAG: 2014 atomic_inc_mb(&dev->dev_ordered_sync); 2015 2016 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", 2017 cmd->t_task_cdb[0]); 2018 2019 /* 2020 * Execute an ORDERED command if no other older commands 2021 * exist that need to be completed first. 2022 */ 2023 if (!atomic_read(&dev->simple_cmds)) 2024 return false; 2025 break; 2026 default: 2027 /* 2028 * For SIMPLE and UNTAGGED Task Attribute commands 2029 */ 2030 atomic_inc_mb(&dev->simple_cmds); 2031 break; 2032 } 2033 2034 if (atomic_read(&dev->dev_ordered_sync) == 0) 2035 return false; 2036 2037 spin_lock(&dev->delayed_cmd_lock); 2038 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); 2039 spin_unlock(&dev->delayed_cmd_lock); 2040 2041 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", 2042 cmd->t_task_cdb[0], cmd->sam_task_attr); 2043 return true; 2044 } 2045 2046 void target_execute_cmd(struct se_cmd *cmd) 2047 { 2048 /* 2049 * Determine if frontend context caller is requesting the stopping of 2050 * this command for frontend exceptions. 2051 * 2052 * If the received CDB has already been aborted stop processing it here. 2053 */ 2054 if (target_cmd_interrupted(cmd)) 2055 return; 2056 2057 spin_lock_irq(&cmd->t_state_lock); 2058 cmd->t_state = TRANSPORT_PROCESSING; 2059 cmd->transport_state &= ~CMD_T_PRE_EXECUTE; 2060 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; 2061 spin_unlock_irq(&cmd->t_state_lock); 2062 2063 if (target_write_prot_action(cmd)) 2064 return; 2065 2066 if (target_handle_task_attr(cmd)) { 2067 spin_lock_irq(&cmd->t_state_lock); 2068 cmd->transport_state &= ~CMD_T_SENT; 2069 spin_unlock_irq(&cmd->t_state_lock); 2070 return; 2071 } 2072 2073 __target_execute_cmd(cmd, true); 2074 } 2075 EXPORT_SYMBOL(target_execute_cmd); 2076 2077 /* 2078 * Process all commands up to the last received ORDERED task attribute which 2079 * requires another blocking boundary 2080 */ 2081 static void target_restart_delayed_cmds(struct se_device *dev) 2082 { 2083 for (;;) { 2084 struct se_cmd *cmd; 2085 2086 spin_lock(&dev->delayed_cmd_lock); 2087 if (list_empty(&dev->delayed_cmd_list)) { 2088 spin_unlock(&dev->delayed_cmd_lock); 2089 break; 2090 } 2091 2092 cmd = list_entry(dev->delayed_cmd_list.next, 2093 struct se_cmd, se_delayed_node); 2094 list_del(&cmd->se_delayed_node); 2095 spin_unlock(&dev->delayed_cmd_lock); 2096 2097 cmd->transport_state |= CMD_T_SENT; 2098 2099 __target_execute_cmd(cmd, true); 2100 2101 if (cmd->sam_task_attr == TCM_ORDERED_TAG) 2102 break; 2103 } 2104 } 2105 2106 /* 2107 * Called from I/O completion to determine which dormant/delayed 2108 * and ordered cmds need to have their tasks added to the execution queue. 2109 */ 2110 static void transport_complete_task_attr(struct se_cmd *cmd) 2111 { 2112 struct se_device *dev = cmd->se_dev; 2113 2114 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 2115 return; 2116 2117 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) 2118 goto restart; 2119 2120 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { 2121 atomic_dec_mb(&dev->simple_cmds); 2122 dev->dev_cur_ordered_id++; 2123 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { 2124 dev->dev_cur_ordered_id++; 2125 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", 2126 dev->dev_cur_ordered_id); 2127 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { 2128 atomic_dec_mb(&dev->dev_ordered_sync); 2129 2130 dev->dev_cur_ordered_id++; 2131 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", 2132 dev->dev_cur_ordered_id); 2133 } 2134 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; 2135 2136 restart: 2137 target_restart_delayed_cmds(dev); 2138 } 2139 2140 static void transport_complete_qf(struct se_cmd *cmd) 2141 { 2142 int ret = 0; 2143 2144 transport_complete_task_attr(cmd); 2145 /* 2146 * If a fabric driver ->write_pending() or ->queue_data_in() callback 2147 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and 2148 * the same callbacks should not be retried. Return CHECK_CONDITION 2149 * if a scsi_status is not already set. 2150 * 2151 * If a fabric driver ->queue_status() has returned non zero, always 2152 * keep retrying no matter what.. 2153 */ 2154 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { 2155 if (cmd->scsi_status) 2156 goto queue_status; 2157 2158 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 2159 goto queue_status; 2160 } 2161 2162 /* 2163 * Check if we need to send a sense buffer from 2164 * the struct se_cmd in question. We do NOT want 2165 * to take this path of the IO has been marked as 2166 * needing to be treated like a "normal read". This 2167 * is the case if it's a tape read, and either the 2168 * FM, EOM, or ILI bits are set, but there is no 2169 * sense data. 2170 */ 2171 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2172 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) 2173 goto queue_status; 2174 2175 switch (cmd->data_direction) { 2176 case DMA_FROM_DEVICE: 2177 /* queue status if not treating this as a normal read */ 2178 if (cmd->scsi_status && 2179 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2180 goto queue_status; 2181 2182 trace_target_cmd_complete(cmd); 2183 ret = cmd->se_tfo->queue_data_in(cmd); 2184 break; 2185 case DMA_TO_DEVICE: 2186 if (cmd->se_cmd_flags & SCF_BIDI) { 2187 ret = cmd->se_tfo->queue_data_in(cmd); 2188 break; 2189 } 2190 /* fall through */ 2191 case DMA_NONE: 2192 queue_status: 2193 trace_target_cmd_complete(cmd); 2194 ret = cmd->se_tfo->queue_status(cmd); 2195 break; 2196 default: 2197 break; 2198 } 2199 2200 if (ret < 0) { 2201 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2202 return; 2203 } 2204 transport_lun_remove_cmd(cmd); 2205 transport_cmd_check_stop_to_fabric(cmd); 2206 } 2207 2208 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, 2209 int err, bool write_pending) 2210 { 2211 /* 2212 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or 2213 * ->queue_data_in() callbacks from new process context. 2214 * 2215 * Otherwise for other errors, transport_complete_qf() will send 2216 * CHECK_CONDITION via ->queue_status() instead of attempting to 2217 * retry associated fabric driver data-transfer callbacks. 2218 */ 2219 if (err == -EAGAIN || err == -ENOMEM) { 2220 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : 2221 TRANSPORT_COMPLETE_QF_OK; 2222 } else { 2223 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); 2224 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; 2225 } 2226 2227 spin_lock_irq(&dev->qf_cmd_lock); 2228 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 2229 atomic_inc_mb(&dev->dev_qf_count); 2230 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 2231 2232 schedule_work(&cmd->se_dev->qf_work_queue); 2233 } 2234 2235 static bool target_read_prot_action(struct se_cmd *cmd) 2236 { 2237 switch (cmd->prot_op) { 2238 case TARGET_PROT_DIN_STRIP: 2239 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { 2240 u32 sectors = cmd->data_length >> 2241 ilog2(cmd->se_dev->dev_attrib.block_size); 2242 2243 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, 2244 sectors, 0, cmd->t_prot_sg, 2245 0); 2246 if (cmd->pi_err) 2247 return true; 2248 } 2249 break; 2250 case TARGET_PROT_DIN_INSERT: 2251 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) 2252 break; 2253 2254 sbc_dif_generate(cmd); 2255 break; 2256 default: 2257 break; 2258 } 2259 2260 return false; 2261 } 2262 2263 static void target_complete_ok_work(struct work_struct *work) 2264 { 2265 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2266 int ret; 2267 2268 /* 2269 * Check if we need to move delayed/dormant tasks from cmds on the 2270 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 2271 * Attribute. 2272 */ 2273 transport_complete_task_attr(cmd); 2274 2275 /* 2276 * Check to schedule QUEUE_FULL work, or execute an existing 2277 * cmd->transport_qf_callback() 2278 */ 2279 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 2280 schedule_work(&cmd->se_dev->qf_work_queue); 2281 2282 /* 2283 * Check if we need to send a sense buffer from 2284 * the struct se_cmd in question. We do NOT want 2285 * to take this path of the IO has been marked as 2286 * needing to be treated like a "normal read". This 2287 * is the case if it's a tape read, and either the 2288 * FM, EOM, or ILI bits are set, but there is no 2289 * sense data. 2290 */ 2291 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && 2292 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 2293 WARN_ON(!cmd->scsi_status); 2294 ret = transport_send_check_condition_and_sense( 2295 cmd, 0, 1); 2296 if (ret) 2297 goto queue_full; 2298 2299 transport_lun_remove_cmd(cmd); 2300 transport_cmd_check_stop_to_fabric(cmd); 2301 return; 2302 } 2303 /* 2304 * Check for a callback, used by amongst other things 2305 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. 2306 */ 2307 if (cmd->transport_complete_callback) { 2308 sense_reason_t rc; 2309 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); 2310 bool zero_dl = !(cmd->data_length); 2311 int post_ret = 0; 2312 2313 rc = cmd->transport_complete_callback(cmd, true, &post_ret); 2314 if (!rc && !post_ret) { 2315 if (caw && zero_dl) 2316 goto queue_rsp; 2317 2318 return; 2319 } else if (rc) { 2320 ret = transport_send_check_condition_and_sense(cmd, 2321 rc, 0); 2322 if (ret) 2323 goto queue_full; 2324 2325 transport_lun_remove_cmd(cmd); 2326 transport_cmd_check_stop_to_fabric(cmd); 2327 return; 2328 } 2329 } 2330 2331 queue_rsp: 2332 switch (cmd->data_direction) { 2333 case DMA_FROM_DEVICE: 2334 /* 2335 * if this is a READ-type IO, but SCSI status 2336 * is set, then skip returning data and just 2337 * return the status -- unless this IO is marked 2338 * as needing to be treated as a normal read, 2339 * in which case we want to go ahead and return 2340 * the data. This happens, for example, for tape 2341 * reads with the FM, EOM, or ILI bits set, with 2342 * no sense data. 2343 */ 2344 if (cmd->scsi_status && 2345 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) 2346 goto queue_status; 2347 2348 atomic_long_add(cmd->data_length, 2349 &cmd->se_lun->lun_stats.tx_data_octets); 2350 /* 2351 * Perform READ_STRIP of PI using software emulation when 2352 * backend had PI enabled, if the transport will not be 2353 * performing hardware READ_STRIP offload. 2354 */ 2355 if (target_read_prot_action(cmd)) { 2356 ret = transport_send_check_condition_and_sense(cmd, 2357 cmd->pi_err, 0); 2358 if (ret) 2359 goto queue_full; 2360 2361 transport_lun_remove_cmd(cmd); 2362 transport_cmd_check_stop_to_fabric(cmd); 2363 return; 2364 } 2365 2366 trace_target_cmd_complete(cmd); 2367 ret = cmd->se_tfo->queue_data_in(cmd); 2368 if (ret) 2369 goto queue_full; 2370 break; 2371 case DMA_TO_DEVICE: 2372 atomic_long_add(cmd->data_length, 2373 &cmd->se_lun->lun_stats.rx_data_octets); 2374 /* 2375 * Check if we need to send READ payload for BIDI-COMMAND 2376 */ 2377 if (cmd->se_cmd_flags & SCF_BIDI) { 2378 atomic_long_add(cmd->data_length, 2379 &cmd->se_lun->lun_stats.tx_data_octets); 2380 ret = cmd->se_tfo->queue_data_in(cmd); 2381 if (ret) 2382 goto queue_full; 2383 break; 2384 } 2385 /* fall through */ 2386 case DMA_NONE: 2387 queue_status: 2388 trace_target_cmd_complete(cmd); 2389 ret = cmd->se_tfo->queue_status(cmd); 2390 if (ret) 2391 goto queue_full; 2392 break; 2393 default: 2394 break; 2395 } 2396 2397 transport_lun_remove_cmd(cmd); 2398 transport_cmd_check_stop_to_fabric(cmd); 2399 return; 2400 2401 queue_full: 2402 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 2403 " data_direction: %d\n", cmd, cmd->data_direction); 2404 2405 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); 2406 } 2407 2408 void target_free_sgl(struct scatterlist *sgl, int nents) 2409 { 2410 sgl_free_n_order(sgl, nents, 0); 2411 } 2412 EXPORT_SYMBOL(target_free_sgl); 2413 2414 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) 2415 { 2416 /* 2417 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE 2418 * emulation, and free + reset pointers if necessary.. 2419 */ 2420 if (!cmd->t_data_sg_orig) 2421 return; 2422 2423 kfree(cmd->t_data_sg); 2424 cmd->t_data_sg = cmd->t_data_sg_orig; 2425 cmd->t_data_sg_orig = NULL; 2426 cmd->t_data_nents = cmd->t_data_nents_orig; 2427 cmd->t_data_nents_orig = 0; 2428 } 2429 2430 static inline void transport_free_pages(struct se_cmd *cmd) 2431 { 2432 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2433 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); 2434 cmd->t_prot_sg = NULL; 2435 cmd->t_prot_nents = 0; 2436 } 2437 2438 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { 2439 /* 2440 * Release special case READ buffer payload required for 2441 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE 2442 */ 2443 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { 2444 target_free_sgl(cmd->t_bidi_data_sg, 2445 cmd->t_bidi_data_nents); 2446 cmd->t_bidi_data_sg = NULL; 2447 cmd->t_bidi_data_nents = 0; 2448 } 2449 transport_reset_sgl_orig(cmd); 2450 return; 2451 } 2452 transport_reset_sgl_orig(cmd); 2453 2454 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 2455 cmd->t_data_sg = NULL; 2456 cmd->t_data_nents = 0; 2457 2458 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 2459 cmd->t_bidi_data_sg = NULL; 2460 cmd->t_bidi_data_nents = 0; 2461 } 2462 2463 void *transport_kmap_data_sg(struct se_cmd *cmd) 2464 { 2465 struct scatterlist *sg = cmd->t_data_sg; 2466 struct page **pages; 2467 int i; 2468 2469 /* 2470 * We need to take into account a possible offset here for fabrics like 2471 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 2472 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 2473 */ 2474 if (!cmd->t_data_nents) 2475 return NULL; 2476 2477 BUG_ON(!sg); 2478 if (cmd->t_data_nents == 1) 2479 return kmap(sg_page(sg)) + sg->offset; 2480 2481 /* >1 page. use vmap */ 2482 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); 2483 if (!pages) 2484 return NULL; 2485 2486 /* convert sg[] to pages[] */ 2487 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 2488 pages[i] = sg_page(sg); 2489 } 2490 2491 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 2492 kfree(pages); 2493 if (!cmd->t_data_vmap) 2494 return NULL; 2495 2496 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 2497 } 2498 EXPORT_SYMBOL(transport_kmap_data_sg); 2499 2500 void transport_kunmap_data_sg(struct se_cmd *cmd) 2501 { 2502 if (!cmd->t_data_nents) { 2503 return; 2504 } else if (cmd->t_data_nents == 1) { 2505 kunmap(sg_page(cmd->t_data_sg)); 2506 return; 2507 } 2508 2509 vunmap(cmd->t_data_vmap); 2510 cmd->t_data_vmap = NULL; 2511 } 2512 EXPORT_SYMBOL(transport_kunmap_data_sg); 2513 2514 int 2515 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, 2516 bool zero_page, bool chainable) 2517 { 2518 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); 2519 2520 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents); 2521 return *sgl ? 0 : -ENOMEM; 2522 } 2523 EXPORT_SYMBOL(target_alloc_sgl); 2524 2525 /* 2526 * Allocate any required resources to execute the command. For writes we 2527 * might not have the payload yet, so notify the fabric via a call to 2528 * ->write_pending instead. Otherwise place it on the execution queue. 2529 */ 2530 sense_reason_t 2531 transport_generic_new_cmd(struct se_cmd *cmd) 2532 { 2533 unsigned long flags; 2534 int ret = 0; 2535 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); 2536 2537 if (cmd->prot_op != TARGET_PROT_NORMAL && 2538 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { 2539 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, 2540 cmd->prot_length, true, false); 2541 if (ret < 0) 2542 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2543 } 2544 2545 /* 2546 * Determine if the TCM fabric module has already allocated physical 2547 * memory, and is directly calling transport_generic_map_mem_to_cmd() 2548 * beforehand. 2549 */ 2550 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 2551 cmd->data_length) { 2552 2553 if ((cmd->se_cmd_flags & SCF_BIDI) || 2554 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { 2555 u32 bidi_length; 2556 2557 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) 2558 bidi_length = cmd->t_task_nolb * 2559 cmd->se_dev->dev_attrib.block_size; 2560 else 2561 bidi_length = cmd->data_length; 2562 2563 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2564 &cmd->t_bidi_data_nents, 2565 bidi_length, zero_flag, false); 2566 if (ret < 0) 2567 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2568 } 2569 2570 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 2571 cmd->data_length, zero_flag, false); 2572 if (ret < 0) 2573 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2574 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2575 cmd->data_length) { 2576 /* 2577 * Special case for COMPARE_AND_WRITE with fabrics 2578 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. 2579 */ 2580 u32 caw_length = cmd->t_task_nolb * 2581 cmd->se_dev->dev_attrib.block_size; 2582 2583 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, 2584 &cmd->t_bidi_data_nents, 2585 caw_length, zero_flag, false); 2586 if (ret < 0) 2587 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2588 } 2589 /* 2590 * If this command is not a write we can execute it right here, 2591 * for write buffers we need to notify the fabric driver first 2592 * and let it call back once the write buffers are ready. 2593 */ 2594 target_add_to_state_list(cmd); 2595 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { 2596 target_execute_cmd(cmd); 2597 return 0; 2598 } 2599 2600 spin_lock_irqsave(&cmd->t_state_lock, flags); 2601 cmd->t_state = TRANSPORT_WRITE_PENDING; 2602 /* 2603 * Determine if frontend context caller is requesting the stopping of 2604 * this command for frontend exceptions. 2605 */ 2606 if (cmd->transport_state & CMD_T_STOP && 2607 !cmd->se_tfo->write_pending_must_be_called) { 2608 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 2609 __func__, __LINE__, cmd->tag); 2610 2611 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2612 2613 complete_all(&cmd->t_transport_stop_comp); 2614 return 0; 2615 } 2616 cmd->transport_state &= ~CMD_T_ACTIVE; 2617 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2618 2619 ret = cmd->se_tfo->write_pending(cmd); 2620 if (ret) 2621 goto queue_full; 2622 2623 return 0; 2624 2625 queue_full: 2626 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 2627 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2628 return 0; 2629 } 2630 EXPORT_SYMBOL(transport_generic_new_cmd); 2631 2632 static void transport_write_pending_qf(struct se_cmd *cmd) 2633 { 2634 unsigned long flags; 2635 int ret; 2636 bool stop; 2637 2638 spin_lock_irqsave(&cmd->t_state_lock, flags); 2639 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); 2640 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2641 2642 if (stop) { 2643 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", 2644 __func__, __LINE__, cmd->tag); 2645 complete_all(&cmd->t_transport_stop_comp); 2646 return; 2647 } 2648 2649 ret = cmd->se_tfo->write_pending(cmd); 2650 if (ret) { 2651 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 2652 cmd); 2653 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); 2654 } 2655 } 2656 2657 static bool 2658 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, 2659 unsigned long *flags); 2660 2661 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) 2662 { 2663 unsigned long flags; 2664 2665 spin_lock_irqsave(&cmd->t_state_lock, flags); 2666 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); 2667 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2668 } 2669 2670 /* 2671 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has 2672 * finished. 2673 */ 2674 void target_put_cmd_and_wait(struct se_cmd *cmd) 2675 { 2676 DECLARE_COMPLETION_ONSTACK(compl); 2677 2678 WARN_ON_ONCE(cmd->abrt_compl); 2679 cmd->abrt_compl = &compl; 2680 target_put_sess_cmd(cmd); 2681 wait_for_completion(&compl); 2682 } 2683 2684 /* 2685 * This function is called by frontend drivers after processing of a command 2686 * has finished. 2687 * 2688 * The protocol for ensuring that either the regular frontend command 2689 * processing flow or target_handle_abort() code drops one reference is as 2690 * follows: 2691 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause 2692 * the frontend driver to call this function synchronously or asynchronously. 2693 * That will cause one reference to be dropped. 2694 * - During regular command processing the target core sets CMD_T_COMPLETE 2695 * before invoking one of the .queue_*() functions. 2696 * - The code that aborts commands skips commands and TMFs for which 2697 * CMD_T_COMPLETE has been set. 2698 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for 2699 * commands that will be aborted. 2700 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set 2701 * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). 2702 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will 2703 * be called and will drop a reference. 2704 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() 2705 * will be called. target_handle_abort() will drop the final reference. 2706 */ 2707 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2708 { 2709 DECLARE_COMPLETION_ONSTACK(compl); 2710 int ret = 0; 2711 bool aborted = false, tas = false; 2712 2713 if (wait_for_tasks) 2714 target_wait_free_cmd(cmd, &aborted, &tas); 2715 2716 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { 2717 /* 2718 * Handle WRITE failure case where transport_generic_new_cmd() 2719 * has already added se_cmd to state_list, but fabric has 2720 * failed command before I/O submission. 2721 */ 2722 if (cmd->state_active) 2723 target_remove_from_state_list(cmd); 2724 2725 if (cmd->se_lun) 2726 transport_lun_remove_cmd(cmd); 2727 } 2728 if (aborted) 2729 cmd->free_compl = &compl; 2730 ret = target_put_sess_cmd(cmd); 2731 if (aborted) { 2732 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); 2733 wait_for_completion(&compl); 2734 ret = 1; 2735 } 2736 return ret; 2737 } 2738 EXPORT_SYMBOL(transport_generic_free_cmd); 2739 2740 /** 2741 * target_get_sess_cmd - Add command to active ->sess_cmd_list 2742 * @se_cmd: command descriptor to add 2743 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 2744 */ 2745 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) 2746 { 2747 struct se_session *se_sess = se_cmd->se_sess; 2748 unsigned long flags; 2749 int ret = 0; 2750 2751 /* 2752 * Add a second kref if the fabric caller is expecting to handle 2753 * fabric acknowledgement that requires two target_put_sess_cmd() 2754 * invocations before se_cmd descriptor release. 2755 */ 2756 if (ack_kref) { 2757 if (!kref_get_unless_zero(&se_cmd->cmd_kref)) 2758 return -EINVAL; 2759 2760 se_cmd->se_cmd_flags |= SCF_ACK_KREF; 2761 } 2762 2763 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2764 if (se_sess->sess_tearing_down) { 2765 ret = -ESHUTDOWN; 2766 goto out; 2767 } 2768 se_cmd->transport_state |= CMD_T_PRE_EXECUTE; 2769 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 2770 percpu_ref_get(&se_sess->cmd_count); 2771 out: 2772 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2773 2774 if (ret && ack_kref) 2775 target_put_sess_cmd(se_cmd); 2776 2777 return ret; 2778 } 2779 EXPORT_SYMBOL(target_get_sess_cmd); 2780 2781 static void target_free_cmd_mem(struct se_cmd *cmd) 2782 { 2783 transport_free_pages(cmd); 2784 2785 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) 2786 core_tmr_release_req(cmd->se_tmr_req); 2787 if (cmd->t_task_cdb != cmd->__t_task_cdb) 2788 kfree(cmd->t_task_cdb); 2789 } 2790 2791 static void target_release_cmd_kref(struct kref *kref) 2792 { 2793 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2794 struct se_session *se_sess = se_cmd->se_sess; 2795 struct completion *free_compl = se_cmd->free_compl; 2796 struct completion *abrt_compl = se_cmd->abrt_compl; 2797 unsigned long flags; 2798 2799 if (se_sess) { 2800 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2801 list_del_init(&se_cmd->se_cmd_list); 2802 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2803 } 2804 2805 target_free_cmd_mem(se_cmd); 2806 se_cmd->se_tfo->release_cmd(se_cmd); 2807 if (free_compl) 2808 complete(free_compl); 2809 if (abrt_compl) 2810 complete(abrt_compl); 2811 2812 percpu_ref_put(&se_sess->cmd_count); 2813 } 2814 2815 /** 2816 * target_put_sess_cmd - decrease the command reference count 2817 * @se_cmd: command to drop a reference from 2818 * 2819 * Returns 1 if and only if this target_put_sess_cmd() call caused the 2820 * refcount to drop to zero. Returns zero otherwise. 2821 */ 2822 int target_put_sess_cmd(struct se_cmd *se_cmd) 2823 { 2824 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2825 } 2826 EXPORT_SYMBOL(target_put_sess_cmd); 2827 2828 static const char *data_dir_name(enum dma_data_direction d) 2829 { 2830 switch (d) { 2831 case DMA_BIDIRECTIONAL: return "BIDI"; 2832 case DMA_TO_DEVICE: return "WRITE"; 2833 case DMA_FROM_DEVICE: return "READ"; 2834 case DMA_NONE: return "NONE"; 2835 } 2836 2837 return "(?)"; 2838 } 2839 2840 static const char *cmd_state_name(enum transport_state_table t) 2841 { 2842 switch (t) { 2843 case TRANSPORT_NO_STATE: return "NO_STATE"; 2844 case TRANSPORT_NEW_CMD: return "NEW_CMD"; 2845 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; 2846 case TRANSPORT_PROCESSING: return "PROCESSING"; 2847 case TRANSPORT_COMPLETE: return "COMPLETE"; 2848 case TRANSPORT_ISTATE_PROCESSING: 2849 return "ISTATE_PROCESSING"; 2850 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; 2851 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; 2852 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; 2853 } 2854 2855 return "(?)"; 2856 } 2857 2858 static void target_append_str(char **str, const char *txt) 2859 { 2860 char *prev = *str; 2861 2862 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : 2863 kstrdup(txt, GFP_ATOMIC); 2864 kfree(prev); 2865 } 2866 2867 /* 2868 * Convert a transport state bitmask into a string. The caller is 2869 * responsible for freeing the returned pointer. 2870 */ 2871 static char *target_ts_to_str(u32 ts) 2872 { 2873 char *str = NULL; 2874 2875 if (ts & CMD_T_ABORTED) 2876 target_append_str(&str, "aborted"); 2877 if (ts & CMD_T_ACTIVE) 2878 target_append_str(&str, "active"); 2879 if (ts & CMD_T_COMPLETE) 2880 target_append_str(&str, "complete"); 2881 if (ts & CMD_T_SENT) 2882 target_append_str(&str, "sent"); 2883 if (ts & CMD_T_STOP) 2884 target_append_str(&str, "stop"); 2885 if (ts & CMD_T_FABRIC_STOP) 2886 target_append_str(&str, "fabric_stop"); 2887 2888 return str; 2889 } 2890 2891 static const char *target_tmf_name(enum tcm_tmreq_table tmf) 2892 { 2893 switch (tmf) { 2894 case TMR_ABORT_TASK: return "ABORT_TASK"; 2895 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; 2896 case TMR_CLEAR_ACA: return "CLEAR_ACA"; 2897 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; 2898 case TMR_LUN_RESET: return "LUN_RESET"; 2899 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; 2900 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; 2901 case TMR_UNKNOWN: break; 2902 } 2903 return "(?)"; 2904 } 2905 2906 void target_show_cmd(const char *pfx, struct se_cmd *cmd) 2907 { 2908 char *ts_str = target_ts_to_str(cmd->transport_state); 2909 const u8 *cdb = cmd->t_task_cdb; 2910 struct se_tmr_req *tmf = cmd->se_tmr_req; 2911 2912 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2913 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", 2914 pfx, cdb[0], cdb[1], cmd->tag, 2915 data_dir_name(cmd->data_direction), 2916 cmd->se_tfo->get_cmd_state(cmd), 2917 cmd_state_name(cmd->t_state), cmd->data_length, 2918 kref_read(&cmd->cmd_kref), ts_str); 2919 } else { 2920 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", 2921 pfx, target_tmf_name(tmf->function), cmd->tag, 2922 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), 2923 cmd_state_name(cmd->t_state), 2924 kref_read(&cmd->cmd_kref), ts_str); 2925 } 2926 kfree(ts_str); 2927 } 2928 EXPORT_SYMBOL(target_show_cmd); 2929 2930 /** 2931 * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued. 2932 * @se_sess: session to flag 2933 */ 2934 void target_sess_cmd_list_set_waiting(struct se_session *se_sess) 2935 { 2936 unsigned long flags; 2937 2938 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2939 se_sess->sess_tearing_down = 1; 2940 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2941 2942 percpu_ref_kill(&se_sess->cmd_count); 2943 } 2944 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); 2945 2946 /** 2947 * target_wait_for_sess_cmds - Wait for outstanding commands 2948 * @se_sess: session to wait for active I/O 2949 */ 2950 void target_wait_for_sess_cmds(struct se_session *se_sess) 2951 { 2952 struct se_cmd *cmd; 2953 int ret; 2954 2955 WARN_ON_ONCE(!se_sess->sess_tearing_down); 2956 2957 do { 2958 ret = wait_event_timeout(se_sess->cmd_list_wq, 2959 percpu_ref_is_zero(&se_sess->cmd_count), 2960 180 * HZ); 2961 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list) 2962 target_show_cmd("session shutdown: still waiting for ", 2963 cmd); 2964 } while (ret <= 0); 2965 } 2966 EXPORT_SYMBOL(target_wait_for_sess_cmds); 2967 2968 /* 2969 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until 2970 * all references to the LUN have been released. Called during LUN shutdown. 2971 */ 2972 void transport_clear_lun_ref(struct se_lun *lun) 2973 { 2974 percpu_ref_kill(&lun->lun_ref); 2975 wait_for_completion(&lun->lun_shutdown_comp); 2976 } 2977 2978 static bool 2979 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, 2980 bool *aborted, bool *tas, unsigned long *flags) 2981 __releases(&cmd->t_state_lock) 2982 __acquires(&cmd->t_state_lock) 2983 { 2984 2985 assert_spin_locked(&cmd->t_state_lock); 2986 WARN_ON_ONCE(!irqs_disabled()); 2987 2988 if (fabric_stop) 2989 cmd->transport_state |= CMD_T_FABRIC_STOP; 2990 2991 if (cmd->transport_state & CMD_T_ABORTED) 2992 *aborted = true; 2993 2994 if (cmd->transport_state & CMD_T_TAS) 2995 *tas = true; 2996 2997 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2998 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2999 return false; 3000 3001 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 3002 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 3003 return false; 3004 3005 if (!(cmd->transport_state & CMD_T_ACTIVE)) 3006 return false; 3007 3008 if (fabric_stop && *aborted) 3009 return false; 3010 3011 cmd->transport_state |= CMD_T_STOP; 3012 3013 target_show_cmd("wait_for_tasks: Stopping ", cmd); 3014 3015 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 3016 3017 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, 3018 180 * HZ)) 3019 target_show_cmd("wait for tasks: ", cmd); 3020 3021 spin_lock_irqsave(&cmd->t_state_lock, *flags); 3022 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 3023 3024 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" 3025 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); 3026 3027 return true; 3028 } 3029 3030 /** 3031 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp 3032 * @cmd: command to wait on 3033 */ 3034 bool transport_wait_for_tasks(struct se_cmd *cmd) 3035 { 3036 unsigned long flags; 3037 bool ret, aborted = false, tas = false; 3038 3039 spin_lock_irqsave(&cmd->t_state_lock, flags); 3040 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); 3041 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3042 3043 return ret; 3044 } 3045 EXPORT_SYMBOL(transport_wait_for_tasks); 3046 3047 struct sense_info { 3048 u8 key; 3049 u8 asc; 3050 u8 ascq; 3051 bool add_sector_info; 3052 }; 3053 3054 static const struct sense_info sense_info_table[] = { 3055 [TCM_NO_SENSE] = { 3056 .key = NOT_READY 3057 }, 3058 [TCM_NON_EXISTENT_LUN] = { 3059 .key = ILLEGAL_REQUEST, 3060 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ 3061 }, 3062 [TCM_UNSUPPORTED_SCSI_OPCODE] = { 3063 .key = ILLEGAL_REQUEST, 3064 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3065 }, 3066 [TCM_SECTOR_COUNT_TOO_MANY] = { 3067 .key = ILLEGAL_REQUEST, 3068 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ 3069 }, 3070 [TCM_UNKNOWN_MODE_PAGE] = { 3071 .key = ILLEGAL_REQUEST, 3072 .asc = 0x24, /* INVALID FIELD IN CDB */ 3073 }, 3074 [TCM_CHECK_CONDITION_ABORT_CMD] = { 3075 .key = ABORTED_COMMAND, 3076 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ 3077 .ascq = 0x03, 3078 }, 3079 [TCM_INCORRECT_AMOUNT_OF_DATA] = { 3080 .key = ABORTED_COMMAND, 3081 .asc = 0x0c, /* WRITE ERROR */ 3082 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ 3083 }, 3084 [TCM_INVALID_CDB_FIELD] = { 3085 .key = ILLEGAL_REQUEST, 3086 .asc = 0x24, /* INVALID FIELD IN CDB */ 3087 }, 3088 [TCM_INVALID_PARAMETER_LIST] = { 3089 .key = ILLEGAL_REQUEST, 3090 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 3091 }, 3092 [TCM_TOO_MANY_TARGET_DESCS] = { 3093 .key = ILLEGAL_REQUEST, 3094 .asc = 0x26, 3095 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ 3096 }, 3097 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { 3098 .key = ILLEGAL_REQUEST, 3099 .asc = 0x26, 3100 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ 3101 }, 3102 [TCM_TOO_MANY_SEGMENT_DESCS] = { 3103 .key = ILLEGAL_REQUEST, 3104 .asc = 0x26, 3105 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ 3106 }, 3107 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { 3108 .key = ILLEGAL_REQUEST, 3109 .asc = 0x26, 3110 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ 3111 }, 3112 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 3113 .key = ILLEGAL_REQUEST, 3114 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 3115 }, 3116 [TCM_UNEXPECTED_UNSOLICITED_DATA] = { 3117 .key = ILLEGAL_REQUEST, 3118 .asc = 0x0c, /* WRITE ERROR */ 3119 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ 3120 }, 3121 [TCM_SERVICE_CRC_ERROR] = { 3122 .key = ABORTED_COMMAND, 3123 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ 3124 .ascq = 0x05, /* N/A */ 3125 }, 3126 [TCM_SNACK_REJECTED] = { 3127 .key = ABORTED_COMMAND, 3128 .asc = 0x11, /* READ ERROR */ 3129 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ 3130 }, 3131 [TCM_WRITE_PROTECTED] = { 3132 .key = DATA_PROTECT, 3133 .asc = 0x27, /* WRITE PROTECTED */ 3134 }, 3135 [TCM_ADDRESS_OUT_OF_RANGE] = { 3136 .key = ILLEGAL_REQUEST, 3137 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ 3138 }, 3139 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { 3140 .key = UNIT_ATTENTION, 3141 }, 3142 [TCM_CHECK_CONDITION_NOT_READY] = { 3143 .key = NOT_READY, 3144 }, 3145 [TCM_MISCOMPARE_VERIFY] = { 3146 .key = MISCOMPARE, 3147 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ 3148 .ascq = 0x00, 3149 }, 3150 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { 3151 .key = ABORTED_COMMAND, 3152 .asc = 0x10, 3153 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ 3154 .add_sector_info = true, 3155 }, 3156 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { 3157 .key = ABORTED_COMMAND, 3158 .asc = 0x10, 3159 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ 3160 .add_sector_info = true, 3161 }, 3162 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { 3163 .key = ABORTED_COMMAND, 3164 .asc = 0x10, 3165 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ 3166 .add_sector_info = true, 3167 }, 3168 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { 3169 .key = COPY_ABORTED, 3170 .asc = 0x0d, 3171 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ 3172 3173 }, 3174 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { 3175 /* 3176 * Returning ILLEGAL REQUEST would cause immediate IO errors on 3177 * Solaris initiators. Returning NOT READY instead means the 3178 * operations will be retried a finite number of times and we 3179 * can survive intermittent errors. 3180 */ 3181 .key = NOT_READY, 3182 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ 3183 }, 3184 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = { 3185 /* 3186 * From spc4r22 section5.7.7,5.7.8 3187 * If a PERSISTENT RESERVE OUT command with a REGISTER service action 3188 * or a REGISTER AND IGNORE EXISTING KEY service action or 3189 * REGISTER AND MOVE service actionis attempted, 3190 * but there are insufficient device server resources to complete the 3191 * operation, then the command shall be terminated with CHECK CONDITION 3192 * status, with the sense key set to ILLEGAL REQUEST,and the additonal 3193 * sense code set to INSUFFICIENT REGISTRATION RESOURCES. 3194 */ 3195 .key = ILLEGAL_REQUEST, 3196 .asc = 0x55, 3197 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */ 3198 }, 3199 }; 3200 3201 /** 3202 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq 3203 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will 3204 * be stored. 3205 * @reason: LIO sense reason code. If this argument has the value 3206 * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If 3207 * dequeuing a unit attention fails due to multiple commands being processed 3208 * concurrently, set the command status to BUSY. 3209 * 3210 * Return: 0 upon success or -EINVAL if the sense buffer is too small. 3211 */ 3212 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) 3213 { 3214 const struct sense_info *si; 3215 u8 *buffer = cmd->sense_buffer; 3216 int r = (__force int)reason; 3217 u8 key, asc, ascq; 3218 bool desc_format = target_sense_desc_format(cmd->se_dev); 3219 3220 if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) 3221 si = &sense_info_table[r]; 3222 else 3223 si = &sense_info_table[(__force int) 3224 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; 3225 3226 key = si->key; 3227 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { 3228 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, 3229 &ascq)) { 3230 cmd->scsi_status = SAM_STAT_BUSY; 3231 return; 3232 } 3233 } else if (si->asc == 0) { 3234 WARN_ON_ONCE(cmd->scsi_asc == 0); 3235 asc = cmd->scsi_asc; 3236 ascq = cmd->scsi_ascq; 3237 } else { 3238 asc = si->asc; 3239 ascq = si->ascq; 3240 } 3241 3242 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 3243 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 3244 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; 3245 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); 3246 if (si->add_sector_info) 3247 WARN_ON_ONCE(scsi_set_sense_information(buffer, 3248 cmd->scsi_sense_length, 3249 cmd->bad_sector) < 0); 3250 } 3251 3252 int 3253 transport_send_check_condition_and_sense(struct se_cmd *cmd, 3254 sense_reason_t reason, int from_transport) 3255 { 3256 unsigned long flags; 3257 3258 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); 3259 3260 spin_lock_irqsave(&cmd->t_state_lock, flags); 3261 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 3262 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3263 return 0; 3264 } 3265 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 3266 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3267 3268 if (!from_transport) 3269 translate_sense_reason(cmd, reason); 3270 3271 trace_target_cmd_complete(cmd); 3272 return cmd->se_tfo->queue_status(cmd); 3273 } 3274 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 3275 3276 static void target_tmr_work(struct work_struct *work) 3277 { 3278 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3279 struct se_device *dev = cmd->se_dev; 3280 struct se_tmr_req *tmr = cmd->se_tmr_req; 3281 int ret; 3282 3283 if (cmd->transport_state & CMD_T_ABORTED) 3284 goto aborted; 3285 3286 switch (tmr->function) { 3287 case TMR_ABORT_TASK: 3288 core_tmr_abort_task(dev, tmr, cmd->se_sess); 3289 break; 3290 case TMR_ABORT_TASK_SET: 3291 case TMR_CLEAR_ACA: 3292 case TMR_CLEAR_TASK_SET: 3293 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 3294 break; 3295 case TMR_LUN_RESET: 3296 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 3297 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 3298 TMR_FUNCTION_REJECTED; 3299 if (tmr->response == TMR_FUNCTION_COMPLETE) { 3300 target_ua_allocate_lun(cmd->se_sess->se_node_acl, 3301 cmd->orig_fe_lun, 0x29, 3302 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); 3303 } 3304 break; 3305 case TMR_TARGET_WARM_RESET: 3306 tmr->response = TMR_FUNCTION_REJECTED; 3307 break; 3308 case TMR_TARGET_COLD_RESET: 3309 tmr->response = TMR_FUNCTION_REJECTED; 3310 break; 3311 default: 3312 pr_err("Unknown TMR function: 0x%02x.\n", 3313 tmr->function); 3314 tmr->response = TMR_FUNCTION_REJECTED; 3315 break; 3316 } 3317 3318 if (cmd->transport_state & CMD_T_ABORTED) 3319 goto aborted; 3320 3321 cmd->se_tfo->queue_tm_rsp(cmd); 3322 3323 transport_cmd_check_stop_to_fabric(cmd); 3324 return; 3325 3326 aborted: 3327 target_handle_abort(cmd); 3328 } 3329 3330 int transport_generic_handle_tmr( 3331 struct se_cmd *cmd) 3332 { 3333 unsigned long flags; 3334 bool aborted = false; 3335 3336 spin_lock_irqsave(&cmd->t_state_lock, flags); 3337 if (cmd->transport_state & CMD_T_ABORTED) { 3338 aborted = true; 3339 } else { 3340 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3341 cmd->transport_state |= CMD_T_ACTIVE; 3342 } 3343 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3344 3345 if (aborted) { 3346 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n", 3347 cmd->se_tmr_req->function, 3348 cmd->se_tmr_req->ref_task_tag, cmd->tag); 3349 target_handle_abort(cmd); 3350 return 0; 3351 } 3352 3353 INIT_WORK(&cmd->work, target_tmr_work); 3354 schedule_work(&cmd->work); 3355 return 0; 3356 } 3357 EXPORT_SYMBOL(transport_generic_handle_tmr); 3358 3359 bool 3360 target_check_wce(struct se_device *dev) 3361 { 3362 bool wce = false; 3363 3364 if (dev->transport->get_write_cache) 3365 wce = dev->transport->get_write_cache(dev); 3366 else if (dev->dev_attrib.emulate_write_cache > 0) 3367 wce = true; 3368 3369 return wce; 3370 } 3371 3372 bool 3373 target_check_fua(struct se_device *dev) 3374 { 3375 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; 3376 } 3377