1 /******************************************************************************* 2 * Filename: target_core_transport.c 3 * 4 * This file contains the Generic Target Engine Core. 5 * 6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. 7 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 8 * Copyright (c) 2007-2010 Rising Tide Systems 9 * Copyright (c) 2008-2010 Linux-iSCSI.org 10 * 11 * Nicholas A. Bellinger <nab@kernel.org> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * 27 ******************************************************************************/ 28 29 #include <linux/net.h> 30 #include <linux/delay.h> 31 #include <linux/string.h> 32 #include <linux/timer.h> 33 #include <linux/slab.h> 34 #include <linux/blkdev.h> 35 #include <linux/spinlock.h> 36 #include <linux/kthread.h> 37 #include <linux/in.h> 38 #include <linux/cdrom.h> 39 #include <linux/module.h> 40 #include <asm/unaligned.h> 41 #include <net/sock.h> 42 #include <net/tcp.h> 43 #include <scsi/scsi.h> 44 #include <scsi/scsi_cmnd.h> 45 #include <scsi/scsi_tcq.h> 46 47 #include <target/target_core_base.h> 48 #include <target/target_core_backend.h> 49 #include <target/target_core_fabric.h> 50 #include <target/target_core_configfs.h> 51 52 #include "target_core_internal.h" 53 #include "target_core_alua.h" 54 #include "target_core_pr.h" 55 #include "target_core_ua.h" 56 57 static int sub_api_initialized; 58 59 static struct workqueue_struct *target_completion_wq; 60 static struct kmem_cache *se_sess_cache; 61 struct kmem_cache *se_tmr_req_cache; 62 struct kmem_cache *se_ua_cache; 63 struct kmem_cache *t10_pr_reg_cache; 64 struct kmem_cache *t10_alua_lu_gp_cache; 65 struct kmem_cache *t10_alua_lu_gp_mem_cache; 66 struct kmem_cache *t10_alua_tg_pt_gp_cache; 67 struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; 68 69 static int transport_generic_write_pending(struct se_cmd *); 70 static int transport_processing_thread(void *param); 71 static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *); 72 static void transport_complete_task_attr(struct se_cmd *cmd); 73 static void transport_handle_queue_full(struct se_cmd *cmd, 74 struct se_device *dev); 75 static void transport_free_dev_tasks(struct se_cmd *cmd); 76 static int transport_generic_get_mem(struct se_cmd *cmd); 77 static void transport_put_cmd(struct se_cmd *cmd); 78 static void transport_remove_cmd_from_queue(struct se_cmd *cmd); 79 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); 80 static void transport_generic_request_failure(struct se_cmd *); 81 static void target_complete_ok_work(struct work_struct *work); 82 83 int init_se_kmem_caches(void) 84 { 85 se_tmr_req_cache = kmem_cache_create("se_tmr_cache", 86 sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 87 0, NULL); 88 if (!se_tmr_req_cache) { 89 pr_err("kmem_cache_create() for struct se_tmr_req" 90 " failed\n"); 91 goto out; 92 } 93 se_sess_cache = kmem_cache_create("se_sess_cache", 94 sizeof(struct se_session), __alignof__(struct se_session), 95 0, NULL); 96 if (!se_sess_cache) { 97 pr_err("kmem_cache_create() for struct se_session" 98 " failed\n"); 99 goto out_free_tmr_req_cache; 100 } 101 se_ua_cache = kmem_cache_create("se_ua_cache", 102 sizeof(struct se_ua), __alignof__(struct se_ua), 103 0, NULL); 104 if (!se_ua_cache) { 105 pr_err("kmem_cache_create() for struct se_ua failed\n"); 106 goto out_free_sess_cache; 107 } 108 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 109 sizeof(struct t10_pr_registration), 110 __alignof__(struct t10_pr_registration), 0, NULL); 111 if (!t10_pr_reg_cache) { 112 pr_err("kmem_cache_create() for struct t10_pr_registration" 113 " failed\n"); 114 goto out_free_ua_cache; 115 } 116 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 117 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 118 0, NULL); 119 if (!t10_alua_lu_gp_cache) { 120 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" 121 " failed\n"); 122 goto out_free_pr_reg_cache; 123 } 124 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 125 sizeof(struct t10_alua_lu_gp_member), 126 __alignof__(struct t10_alua_lu_gp_member), 0, NULL); 127 if (!t10_alua_lu_gp_mem_cache) { 128 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" 129 "cache failed\n"); 130 goto out_free_lu_gp_cache; 131 } 132 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 133 sizeof(struct t10_alua_tg_pt_gp), 134 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); 135 if (!t10_alua_tg_pt_gp_cache) { 136 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 137 "cache failed\n"); 138 goto out_free_lu_gp_mem_cache; 139 } 140 t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( 141 "t10_alua_tg_pt_gp_mem_cache", 142 sizeof(struct t10_alua_tg_pt_gp_member), 143 __alignof__(struct t10_alua_tg_pt_gp_member), 144 0, NULL); 145 if (!t10_alua_tg_pt_gp_mem_cache) { 146 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" 147 "mem_t failed\n"); 148 goto out_free_tg_pt_gp_cache; 149 } 150 151 target_completion_wq = alloc_workqueue("target_completion", 152 WQ_MEM_RECLAIM, 0); 153 if (!target_completion_wq) 154 goto out_free_tg_pt_gp_mem_cache; 155 156 return 0; 157 158 out_free_tg_pt_gp_mem_cache: 159 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 160 out_free_tg_pt_gp_cache: 161 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 162 out_free_lu_gp_mem_cache: 163 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 164 out_free_lu_gp_cache: 165 kmem_cache_destroy(t10_alua_lu_gp_cache); 166 out_free_pr_reg_cache: 167 kmem_cache_destroy(t10_pr_reg_cache); 168 out_free_ua_cache: 169 kmem_cache_destroy(se_ua_cache); 170 out_free_sess_cache: 171 kmem_cache_destroy(se_sess_cache); 172 out_free_tmr_req_cache: 173 kmem_cache_destroy(se_tmr_req_cache); 174 out: 175 return -ENOMEM; 176 } 177 178 void release_se_kmem_caches(void) 179 { 180 destroy_workqueue(target_completion_wq); 181 kmem_cache_destroy(se_tmr_req_cache); 182 kmem_cache_destroy(se_sess_cache); 183 kmem_cache_destroy(se_ua_cache); 184 kmem_cache_destroy(t10_pr_reg_cache); 185 kmem_cache_destroy(t10_alua_lu_gp_cache); 186 kmem_cache_destroy(t10_alua_lu_gp_mem_cache); 187 kmem_cache_destroy(t10_alua_tg_pt_gp_cache); 188 kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); 189 } 190 191 /* This code ensures unique mib indexes are handed out. */ 192 static DEFINE_SPINLOCK(scsi_mib_index_lock); 193 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; 194 195 /* 196 * Allocate a new row index for the entry type specified 197 */ 198 u32 scsi_get_new_index(scsi_index_t type) 199 { 200 u32 new_index; 201 202 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); 203 204 spin_lock(&scsi_mib_index_lock); 205 new_index = ++scsi_mib_index[type]; 206 spin_unlock(&scsi_mib_index_lock); 207 208 return new_index; 209 } 210 211 static void transport_init_queue_obj(struct se_queue_obj *qobj) 212 { 213 atomic_set(&qobj->queue_cnt, 0); 214 INIT_LIST_HEAD(&qobj->qobj_list); 215 init_waitqueue_head(&qobj->thread_wq); 216 spin_lock_init(&qobj->cmd_queue_lock); 217 } 218 219 void transport_subsystem_check_init(void) 220 { 221 int ret; 222 223 if (sub_api_initialized) 224 return; 225 226 ret = request_module("target_core_iblock"); 227 if (ret != 0) 228 pr_err("Unable to load target_core_iblock\n"); 229 230 ret = request_module("target_core_file"); 231 if (ret != 0) 232 pr_err("Unable to load target_core_file\n"); 233 234 ret = request_module("target_core_pscsi"); 235 if (ret != 0) 236 pr_err("Unable to load target_core_pscsi\n"); 237 238 ret = request_module("target_core_stgt"); 239 if (ret != 0) 240 pr_err("Unable to load target_core_stgt\n"); 241 242 sub_api_initialized = 1; 243 return; 244 } 245 246 struct se_session *transport_init_session(void) 247 { 248 struct se_session *se_sess; 249 250 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); 251 if (!se_sess) { 252 pr_err("Unable to allocate struct se_session from" 253 " se_sess_cache\n"); 254 return ERR_PTR(-ENOMEM); 255 } 256 INIT_LIST_HEAD(&se_sess->sess_list); 257 INIT_LIST_HEAD(&se_sess->sess_acl_list); 258 INIT_LIST_HEAD(&se_sess->sess_cmd_list); 259 INIT_LIST_HEAD(&se_sess->sess_wait_list); 260 spin_lock_init(&se_sess->sess_cmd_lock); 261 262 return se_sess; 263 } 264 EXPORT_SYMBOL(transport_init_session); 265 266 /* 267 * Called with spin_lock_bh(&struct se_portal_group->session_lock called. 268 */ 269 void __transport_register_session( 270 struct se_portal_group *se_tpg, 271 struct se_node_acl *se_nacl, 272 struct se_session *se_sess, 273 void *fabric_sess_ptr) 274 { 275 unsigned char buf[PR_REG_ISID_LEN]; 276 277 se_sess->se_tpg = se_tpg; 278 se_sess->fabric_sess_ptr = fabric_sess_ptr; 279 /* 280 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t 281 * 282 * Only set for struct se_session's that will actually be moving I/O. 283 * eg: *NOT* discovery sessions. 284 */ 285 if (se_nacl) { 286 /* 287 * If the fabric module supports an ISID based TransportID, 288 * save this value in binary from the fabric I_T Nexus now. 289 */ 290 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { 291 memset(&buf[0], 0, PR_REG_ISID_LEN); 292 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, 293 &buf[0], PR_REG_ISID_LEN); 294 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); 295 } 296 spin_lock_irq(&se_nacl->nacl_sess_lock); 297 /* 298 * The se_nacl->nacl_sess pointer will be set to the 299 * last active I_T Nexus for each struct se_node_acl. 300 */ 301 se_nacl->nacl_sess = se_sess; 302 303 list_add_tail(&se_sess->sess_acl_list, 304 &se_nacl->acl_sess_list); 305 spin_unlock_irq(&se_nacl->nacl_sess_lock); 306 } 307 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); 308 309 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", 310 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); 311 } 312 EXPORT_SYMBOL(__transport_register_session); 313 314 void transport_register_session( 315 struct se_portal_group *se_tpg, 316 struct se_node_acl *se_nacl, 317 struct se_session *se_sess, 318 void *fabric_sess_ptr) 319 { 320 spin_lock_bh(&se_tpg->session_lock); 321 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); 322 spin_unlock_bh(&se_tpg->session_lock); 323 } 324 EXPORT_SYMBOL(transport_register_session); 325 326 void transport_deregister_session_configfs(struct se_session *se_sess) 327 { 328 struct se_node_acl *se_nacl; 329 unsigned long flags; 330 /* 331 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session 332 */ 333 se_nacl = se_sess->se_node_acl; 334 if (se_nacl) { 335 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); 336 list_del(&se_sess->sess_acl_list); 337 /* 338 * If the session list is empty, then clear the pointer. 339 * Otherwise, set the struct se_session pointer from the tail 340 * element of the per struct se_node_acl active session list. 341 */ 342 if (list_empty(&se_nacl->acl_sess_list)) 343 se_nacl->nacl_sess = NULL; 344 else { 345 se_nacl->nacl_sess = container_of( 346 se_nacl->acl_sess_list.prev, 347 struct se_session, sess_acl_list); 348 } 349 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 350 } 351 } 352 EXPORT_SYMBOL(transport_deregister_session_configfs); 353 354 void transport_free_session(struct se_session *se_sess) 355 { 356 kmem_cache_free(se_sess_cache, se_sess); 357 } 358 EXPORT_SYMBOL(transport_free_session); 359 360 void transport_deregister_session(struct se_session *se_sess) 361 { 362 struct se_portal_group *se_tpg = se_sess->se_tpg; 363 struct se_node_acl *se_nacl; 364 unsigned long flags; 365 366 if (!se_tpg) { 367 transport_free_session(se_sess); 368 return; 369 } 370 371 spin_lock_irqsave(&se_tpg->session_lock, flags); 372 list_del(&se_sess->sess_list); 373 se_sess->se_tpg = NULL; 374 se_sess->fabric_sess_ptr = NULL; 375 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 376 377 /* 378 * Determine if we need to do extra work for this initiator node's 379 * struct se_node_acl if it had been previously dynamically generated. 380 */ 381 se_nacl = se_sess->se_node_acl; 382 if (se_nacl) { 383 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 384 if (se_nacl->dynamic_node_acl) { 385 if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( 386 se_tpg)) { 387 list_del(&se_nacl->acl_list); 388 se_tpg->num_node_acls--; 389 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 390 391 core_tpg_wait_for_nacl_pr_ref(se_nacl); 392 core_free_device_list_for_node(se_nacl, se_tpg); 393 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, 394 se_nacl); 395 spin_lock_irqsave(&se_tpg->acl_node_lock, flags); 396 } 397 } 398 spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); 399 } 400 401 transport_free_session(se_sess); 402 403 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 404 se_tpg->se_tpg_tfo->get_fabric_name()); 405 } 406 EXPORT_SYMBOL(transport_deregister_session); 407 408 /* 409 * Called with cmd->t_state_lock held. 410 */ 411 static void transport_all_task_dev_remove_state(struct se_cmd *cmd) 412 { 413 struct se_device *dev = cmd->se_dev; 414 struct se_task *task; 415 unsigned long flags; 416 417 if (!dev) 418 return; 419 420 list_for_each_entry(task, &cmd->t_task_list, t_list) { 421 if (task->task_flags & TF_ACTIVE) 422 continue; 423 424 spin_lock_irqsave(&dev->execute_task_lock, flags); 425 if (task->t_state_active) { 426 pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", 427 cmd->se_tfo->get_task_tag(cmd), dev, task); 428 429 list_del(&task->t_state_list); 430 atomic_dec(&cmd->t_task_cdbs_ex_left); 431 task->t_state_active = false; 432 } 433 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 434 } 435 436 } 437 438 /* transport_cmd_check_stop(): 439 * 440 * 'transport_off = 1' determines if t_transport_active should be cleared. 441 * 'transport_off = 2' determines if task_dev_state should be removed. 442 * 443 * A non-zero u8 t_state sets cmd->t_state. 444 * Returns 1 when command is stopped, else 0. 445 */ 446 static int transport_cmd_check_stop( 447 struct se_cmd *cmd, 448 int transport_off, 449 u8 t_state) 450 { 451 unsigned long flags; 452 453 spin_lock_irqsave(&cmd->t_state_lock, flags); 454 /* 455 * Determine if IOCTL context caller in requesting the stopping of this 456 * command for LUN shutdown purposes. 457 */ 458 if (atomic_read(&cmd->transport_lun_stop)) { 459 pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" 460 " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, 461 cmd->se_tfo->get_task_tag(cmd)); 462 463 atomic_set(&cmd->t_transport_active, 0); 464 if (transport_off == 2) 465 transport_all_task_dev_remove_state(cmd); 466 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 467 468 complete(&cmd->transport_lun_stop_comp); 469 return 1; 470 } 471 /* 472 * Determine if frontend context caller is requesting the stopping of 473 * this command for frontend exceptions. 474 */ 475 if (atomic_read(&cmd->t_transport_stop)) { 476 pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" 477 " TRUE for ITT: 0x%08x\n", __func__, __LINE__, 478 cmd->se_tfo->get_task_tag(cmd)); 479 480 if (transport_off == 2) 481 transport_all_task_dev_remove_state(cmd); 482 483 /* 484 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff 485 * to FE. 486 */ 487 if (transport_off == 2) 488 cmd->se_lun = NULL; 489 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 490 491 complete(&cmd->t_transport_stop_comp); 492 return 1; 493 } 494 if (transport_off) { 495 atomic_set(&cmd->t_transport_active, 0); 496 if (transport_off == 2) { 497 transport_all_task_dev_remove_state(cmd); 498 /* 499 * Clear struct se_cmd->se_lun before the transport_off == 2 500 * handoff to fabric module. 501 */ 502 cmd->se_lun = NULL; 503 /* 504 * Some fabric modules like tcm_loop can release 505 * their internally allocated I/O reference now and 506 * struct se_cmd now. 507 * 508 * Fabric modules are expected to return '1' here if the 509 * se_cmd being passed is released at this point, 510 * or zero if not being released. 511 */ 512 if (cmd->se_tfo->check_stop_free != NULL) { 513 spin_unlock_irqrestore( 514 &cmd->t_state_lock, flags); 515 516 return cmd->se_tfo->check_stop_free(cmd); 517 } 518 } 519 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 520 521 return 0; 522 } else if (t_state) 523 cmd->t_state = t_state; 524 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 525 526 return 0; 527 } 528 529 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) 530 { 531 return transport_cmd_check_stop(cmd, 2, 0); 532 } 533 534 static void transport_lun_remove_cmd(struct se_cmd *cmd) 535 { 536 struct se_lun *lun = cmd->se_lun; 537 unsigned long flags; 538 539 if (!lun) 540 return; 541 542 spin_lock_irqsave(&cmd->t_state_lock, flags); 543 if (!atomic_read(&cmd->transport_dev_active)) { 544 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 545 goto check_lun; 546 } 547 atomic_set(&cmd->transport_dev_active, 0); 548 transport_all_task_dev_remove_state(cmd); 549 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 550 551 552 check_lun: 553 spin_lock_irqsave(&lun->lun_cmd_lock, flags); 554 if (atomic_read(&cmd->transport_lun_active)) { 555 list_del(&cmd->se_lun_node); 556 atomic_set(&cmd->transport_lun_active, 0); 557 #if 0 558 pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n" 559 cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); 560 #endif 561 } 562 spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); 563 } 564 565 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 566 { 567 if (!cmd->se_tmr_req) 568 transport_lun_remove_cmd(cmd); 569 570 if (transport_cmd_check_stop_to_fabric(cmd)) 571 return; 572 if (remove) { 573 transport_remove_cmd_from_queue(cmd); 574 transport_put_cmd(cmd); 575 } 576 } 577 578 static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, 579 bool at_head) 580 { 581 struct se_device *dev = cmd->se_dev; 582 struct se_queue_obj *qobj = &dev->dev_queue_obj; 583 unsigned long flags; 584 585 if (t_state) { 586 spin_lock_irqsave(&cmd->t_state_lock, flags); 587 cmd->t_state = t_state; 588 atomic_set(&cmd->t_transport_active, 1); 589 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 590 } 591 592 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 593 594 /* If the cmd is already on the list, remove it before we add it */ 595 if (!list_empty(&cmd->se_queue_node)) 596 list_del(&cmd->se_queue_node); 597 else 598 atomic_inc(&qobj->queue_cnt); 599 600 if (at_head) 601 list_add(&cmd->se_queue_node, &qobj->qobj_list); 602 else 603 list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); 604 atomic_set(&cmd->t_transport_queue_active, 1); 605 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 606 607 wake_up_interruptible(&qobj->thread_wq); 608 } 609 610 static struct se_cmd * 611 transport_get_cmd_from_queue(struct se_queue_obj *qobj) 612 { 613 struct se_cmd *cmd; 614 unsigned long flags; 615 616 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 617 if (list_empty(&qobj->qobj_list)) { 618 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 619 return NULL; 620 } 621 cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); 622 623 atomic_set(&cmd->t_transport_queue_active, 0); 624 625 list_del_init(&cmd->se_queue_node); 626 atomic_dec(&qobj->queue_cnt); 627 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 628 629 return cmd; 630 } 631 632 static void transport_remove_cmd_from_queue(struct se_cmd *cmd) 633 { 634 struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj; 635 unsigned long flags; 636 637 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 638 if (!atomic_read(&cmd->t_transport_queue_active)) { 639 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 640 return; 641 } 642 atomic_set(&cmd->t_transport_queue_active, 0); 643 atomic_dec(&qobj->queue_cnt); 644 list_del_init(&cmd->se_queue_node); 645 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 646 647 if (atomic_read(&cmd->t_transport_queue_active)) { 648 pr_err("ITT: 0x%08x t_transport_queue_active: %d\n", 649 cmd->se_tfo->get_task_tag(cmd), 650 atomic_read(&cmd->t_transport_queue_active)); 651 } 652 } 653 654 /* 655 * Completion function used by TCM subsystem plugins (such as FILEIO) 656 * for queueing up response from struct se_subsystem_api->do_task() 657 */ 658 void transport_complete_sync_cache(struct se_cmd *cmd, int good) 659 { 660 struct se_task *task = list_entry(cmd->t_task_list.next, 661 struct se_task, t_list); 662 663 if (good) { 664 cmd->scsi_status = SAM_STAT_GOOD; 665 task->task_scsi_status = GOOD; 666 } else { 667 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 668 task->task_se_cmd->scsi_sense_reason = 669 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 670 671 } 672 673 transport_complete_task(task, good); 674 } 675 EXPORT_SYMBOL(transport_complete_sync_cache); 676 677 static void target_complete_failure_work(struct work_struct *work) 678 { 679 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 680 681 transport_generic_request_failure(cmd); 682 } 683 684 /* transport_complete_task(): 685 * 686 * Called from interrupt and non interrupt context depending 687 * on the transport plugin. 688 */ 689 void transport_complete_task(struct se_task *task, int success) 690 { 691 struct se_cmd *cmd = task->task_se_cmd; 692 struct se_device *dev = cmd->se_dev; 693 unsigned long flags; 694 695 spin_lock_irqsave(&cmd->t_state_lock, flags); 696 task->task_flags &= ~TF_ACTIVE; 697 698 /* 699 * See if any sense data exists, if so set the TASK_SENSE flag. 700 * Also check for any other post completion work that needs to be 701 * done by the plugins. 702 */ 703 if (dev && dev->transport->transport_complete) { 704 if (dev->transport->transport_complete(task) != 0) { 705 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; 706 task->task_flags |= TF_HAS_SENSE; 707 success = 1; 708 } 709 } 710 711 /* 712 * See if we are waiting for outstanding struct se_task 713 * to complete for an exception condition 714 */ 715 if (task->task_flags & TF_REQUEST_STOP) { 716 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 717 complete(&task->task_stop_comp); 718 return; 719 } 720 721 if (!success) 722 cmd->t_tasks_failed = 1; 723 724 /* 725 * Decrement the outstanding t_task_cdbs_left count. The last 726 * struct se_task from struct se_cmd will complete itself into the 727 * device queue depending upon int success. 728 */ 729 if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { 730 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 731 return; 732 } 733 734 if (cmd->t_tasks_failed) { 735 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 736 INIT_WORK(&cmd->work, target_complete_failure_work); 737 } else { 738 atomic_set(&cmd->t_transport_complete, 1); 739 INIT_WORK(&cmd->work, target_complete_ok_work); 740 } 741 742 cmd->t_state = TRANSPORT_COMPLETE; 743 atomic_set(&cmd->t_transport_active, 1); 744 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 745 746 queue_work(target_completion_wq, &cmd->work); 747 } 748 EXPORT_SYMBOL(transport_complete_task); 749 750 /* 751 * Called by transport_add_tasks_from_cmd() once a struct se_cmd's 752 * struct se_task list are ready to be added to the active execution list 753 * struct se_device 754 755 * Called with se_dev_t->execute_task_lock called. 756 */ 757 static inline int transport_add_task_check_sam_attr( 758 struct se_task *task, 759 struct se_task *task_prev, 760 struct se_device *dev) 761 { 762 /* 763 * No SAM Task attribute emulation enabled, add to tail of 764 * execution queue 765 */ 766 if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) { 767 list_add_tail(&task->t_execute_list, &dev->execute_task_list); 768 return 0; 769 } 770 /* 771 * HEAD_OF_QUEUE attribute for received CDB, which means 772 * the first task that is associated with a struct se_cmd goes to 773 * head of the struct se_device->execute_task_list, and task_prev 774 * after that for each subsequent task 775 */ 776 if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) { 777 list_add(&task->t_execute_list, 778 (task_prev != NULL) ? 779 &task_prev->t_execute_list : 780 &dev->execute_task_list); 781 782 pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" 783 " in execution queue\n", 784 task->task_se_cmd->t_task_cdb[0]); 785 return 1; 786 } 787 /* 788 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been 789 * transitioned from Dermant -> Active state, and are added to the end 790 * of the struct se_device->execute_task_list 791 */ 792 list_add_tail(&task->t_execute_list, &dev->execute_task_list); 793 return 0; 794 } 795 796 /* __transport_add_task_to_execute_queue(): 797 * 798 * Called with se_dev_t->execute_task_lock called. 799 */ 800 static void __transport_add_task_to_execute_queue( 801 struct se_task *task, 802 struct se_task *task_prev, 803 struct se_device *dev) 804 { 805 int head_of_queue; 806 807 head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev); 808 atomic_inc(&dev->execute_tasks); 809 810 if (task->t_state_active) 811 return; 812 /* 813 * Determine if this task needs to go to HEAD_OF_QUEUE for the 814 * state list as well. Running with SAM Task Attribute emulation 815 * will always return head_of_queue == 0 here 816 */ 817 if (head_of_queue) 818 list_add(&task->t_state_list, (task_prev) ? 819 &task_prev->t_state_list : 820 &dev->state_task_list); 821 else 822 list_add_tail(&task->t_state_list, &dev->state_task_list); 823 824 task->t_state_active = true; 825 826 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 827 task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), 828 task, dev); 829 } 830 831 static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) 832 { 833 struct se_device *dev = cmd->se_dev; 834 struct se_task *task; 835 unsigned long flags; 836 837 spin_lock_irqsave(&cmd->t_state_lock, flags); 838 list_for_each_entry(task, &cmd->t_task_list, t_list) { 839 spin_lock(&dev->execute_task_lock); 840 if (!task->t_state_active) { 841 list_add_tail(&task->t_state_list, 842 &dev->state_task_list); 843 task->t_state_active = true; 844 845 pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", 846 task->task_se_cmd->se_tfo->get_task_tag( 847 task->task_se_cmd), task, dev); 848 } 849 spin_unlock(&dev->execute_task_lock); 850 } 851 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 852 } 853 854 static void __transport_add_tasks_from_cmd(struct se_cmd *cmd) 855 { 856 struct se_device *dev = cmd->se_dev; 857 struct se_task *task, *task_prev = NULL; 858 859 list_for_each_entry(task, &cmd->t_task_list, t_list) { 860 if (!list_empty(&task->t_execute_list)) 861 continue; 862 /* 863 * __transport_add_task_to_execute_queue() handles the 864 * SAM Task Attribute emulation if enabled 865 */ 866 __transport_add_task_to_execute_queue(task, task_prev, dev); 867 task_prev = task; 868 } 869 } 870 871 static void transport_add_tasks_from_cmd(struct se_cmd *cmd) 872 { 873 unsigned long flags; 874 struct se_device *dev = cmd->se_dev; 875 876 spin_lock_irqsave(&dev->execute_task_lock, flags); 877 __transport_add_tasks_from_cmd(cmd); 878 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 879 } 880 881 void __transport_remove_task_from_execute_queue(struct se_task *task, 882 struct se_device *dev) 883 { 884 list_del_init(&task->t_execute_list); 885 atomic_dec(&dev->execute_tasks); 886 } 887 888 static void transport_remove_task_from_execute_queue( 889 struct se_task *task, 890 struct se_device *dev) 891 { 892 unsigned long flags; 893 894 if (WARN_ON(list_empty(&task->t_execute_list))) 895 return; 896 897 spin_lock_irqsave(&dev->execute_task_lock, flags); 898 __transport_remove_task_from_execute_queue(task, dev); 899 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 900 } 901 902 /* 903 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status 904 */ 905 906 static void target_qf_do_work(struct work_struct *work) 907 { 908 struct se_device *dev = container_of(work, struct se_device, 909 qf_work_queue); 910 LIST_HEAD(qf_cmd_list); 911 struct se_cmd *cmd, *cmd_tmp; 912 913 spin_lock_irq(&dev->qf_cmd_lock); 914 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); 915 spin_unlock_irq(&dev->qf_cmd_lock); 916 917 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 918 list_del(&cmd->se_qf_node); 919 atomic_dec(&dev->dev_qf_count); 920 smp_mb__after_atomic_dec(); 921 922 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 923 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 924 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : 925 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" 926 : "UNKNOWN"); 927 928 transport_add_cmd_to_queue(cmd, cmd->t_state, true); 929 } 930 } 931 932 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) 933 { 934 switch (cmd->data_direction) { 935 case DMA_NONE: 936 return "NONE"; 937 case DMA_FROM_DEVICE: 938 return "READ"; 939 case DMA_TO_DEVICE: 940 return "WRITE"; 941 case DMA_BIDIRECTIONAL: 942 return "BIDI"; 943 default: 944 break; 945 } 946 947 return "UNKNOWN"; 948 } 949 950 void transport_dump_dev_state( 951 struct se_device *dev, 952 char *b, 953 int *bl) 954 { 955 *bl += sprintf(b + *bl, "Status: "); 956 switch (dev->dev_status) { 957 case TRANSPORT_DEVICE_ACTIVATED: 958 *bl += sprintf(b + *bl, "ACTIVATED"); 959 break; 960 case TRANSPORT_DEVICE_DEACTIVATED: 961 *bl += sprintf(b + *bl, "DEACTIVATED"); 962 break; 963 case TRANSPORT_DEVICE_SHUTDOWN: 964 *bl += sprintf(b + *bl, "SHUTDOWN"); 965 break; 966 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: 967 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: 968 *bl += sprintf(b + *bl, "OFFLINE"); 969 break; 970 default: 971 *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status); 972 break; 973 } 974 975 *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d", 976 atomic_read(&dev->execute_tasks), dev->queue_depth); 977 *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", 978 dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); 979 *bl += sprintf(b + *bl, " "); 980 } 981 982 void transport_dump_vpd_proto_id( 983 struct t10_vpd *vpd, 984 unsigned char *p_buf, 985 int p_buf_len) 986 { 987 unsigned char buf[VPD_TMP_BUF_SIZE]; 988 int len; 989 990 memset(buf, 0, VPD_TMP_BUF_SIZE); 991 len = sprintf(buf, "T10 VPD Protocol Identifier: "); 992 993 switch (vpd->protocol_identifier) { 994 case 0x00: 995 sprintf(buf+len, "Fibre Channel\n"); 996 break; 997 case 0x10: 998 sprintf(buf+len, "Parallel SCSI\n"); 999 break; 1000 case 0x20: 1001 sprintf(buf+len, "SSA\n"); 1002 break; 1003 case 0x30: 1004 sprintf(buf+len, "IEEE 1394\n"); 1005 break; 1006 case 0x40: 1007 sprintf(buf+len, "SCSI Remote Direct Memory Access" 1008 " Protocol\n"); 1009 break; 1010 case 0x50: 1011 sprintf(buf+len, "Internet SCSI (iSCSI)\n"); 1012 break; 1013 case 0x60: 1014 sprintf(buf+len, "SAS Serial SCSI Protocol\n"); 1015 break; 1016 case 0x70: 1017 sprintf(buf+len, "Automation/Drive Interface Transport" 1018 " Protocol\n"); 1019 break; 1020 case 0x80: 1021 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); 1022 break; 1023 default: 1024 sprintf(buf+len, "Unknown 0x%02x\n", 1025 vpd->protocol_identifier); 1026 break; 1027 } 1028 1029 if (p_buf) 1030 strncpy(p_buf, buf, p_buf_len); 1031 else 1032 pr_debug("%s", buf); 1033 } 1034 1035 void 1036 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) 1037 { 1038 /* 1039 * Check if the Protocol Identifier Valid (PIV) bit is set.. 1040 * 1041 * from spc3r23.pdf section 7.5.1 1042 */ 1043 if (page_83[1] & 0x80) { 1044 vpd->protocol_identifier = (page_83[0] & 0xf0); 1045 vpd->protocol_identifier_set = 1; 1046 transport_dump_vpd_proto_id(vpd, NULL, 0); 1047 } 1048 } 1049 EXPORT_SYMBOL(transport_set_vpd_proto_id); 1050 1051 int transport_dump_vpd_assoc( 1052 struct t10_vpd *vpd, 1053 unsigned char *p_buf, 1054 int p_buf_len) 1055 { 1056 unsigned char buf[VPD_TMP_BUF_SIZE]; 1057 int ret = 0; 1058 int len; 1059 1060 memset(buf, 0, VPD_TMP_BUF_SIZE); 1061 len = sprintf(buf, "T10 VPD Identifier Association: "); 1062 1063 switch (vpd->association) { 1064 case 0x00: 1065 sprintf(buf+len, "addressed logical unit\n"); 1066 break; 1067 case 0x10: 1068 sprintf(buf+len, "target port\n"); 1069 break; 1070 case 0x20: 1071 sprintf(buf+len, "SCSI target device\n"); 1072 break; 1073 default: 1074 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); 1075 ret = -EINVAL; 1076 break; 1077 } 1078 1079 if (p_buf) 1080 strncpy(p_buf, buf, p_buf_len); 1081 else 1082 pr_debug("%s", buf); 1083 1084 return ret; 1085 } 1086 1087 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) 1088 { 1089 /* 1090 * The VPD identification association.. 1091 * 1092 * from spc3r23.pdf Section 7.6.3.1 Table 297 1093 */ 1094 vpd->association = (page_83[1] & 0x30); 1095 return transport_dump_vpd_assoc(vpd, NULL, 0); 1096 } 1097 EXPORT_SYMBOL(transport_set_vpd_assoc); 1098 1099 int transport_dump_vpd_ident_type( 1100 struct t10_vpd *vpd, 1101 unsigned char *p_buf, 1102 int p_buf_len) 1103 { 1104 unsigned char buf[VPD_TMP_BUF_SIZE]; 1105 int ret = 0; 1106 int len; 1107 1108 memset(buf, 0, VPD_TMP_BUF_SIZE); 1109 len = sprintf(buf, "T10 VPD Identifier Type: "); 1110 1111 switch (vpd->device_identifier_type) { 1112 case 0x00: 1113 sprintf(buf+len, "Vendor specific\n"); 1114 break; 1115 case 0x01: 1116 sprintf(buf+len, "T10 Vendor ID based\n"); 1117 break; 1118 case 0x02: 1119 sprintf(buf+len, "EUI-64 based\n"); 1120 break; 1121 case 0x03: 1122 sprintf(buf+len, "NAA\n"); 1123 break; 1124 case 0x04: 1125 sprintf(buf+len, "Relative target port identifier\n"); 1126 break; 1127 case 0x08: 1128 sprintf(buf+len, "SCSI name string\n"); 1129 break; 1130 default: 1131 sprintf(buf+len, "Unsupported: 0x%02x\n", 1132 vpd->device_identifier_type); 1133 ret = -EINVAL; 1134 break; 1135 } 1136 1137 if (p_buf) { 1138 if (p_buf_len < strlen(buf)+1) 1139 return -EINVAL; 1140 strncpy(p_buf, buf, p_buf_len); 1141 } else { 1142 pr_debug("%s", buf); 1143 } 1144 1145 return ret; 1146 } 1147 1148 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) 1149 { 1150 /* 1151 * The VPD identifier type.. 1152 * 1153 * from spc3r23.pdf Section 7.6.3.1 Table 298 1154 */ 1155 vpd->device_identifier_type = (page_83[1] & 0x0f); 1156 return transport_dump_vpd_ident_type(vpd, NULL, 0); 1157 } 1158 EXPORT_SYMBOL(transport_set_vpd_ident_type); 1159 1160 int transport_dump_vpd_ident( 1161 struct t10_vpd *vpd, 1162 unsigned char *p_buf, 1163 int p_buf_len) 1164 { 1165 unsigned char buf[VPD_TMP_BUF_SIZE]; 1166 int ret = 0; 1167 1168 memset(buf, 0, VPD_TMP_BUF_SIZE); 1169 1170 switch (vpd->device_identifier_code_set) { 1171 case 0x01: /* Binary */ 1172 sprintf(buf, "T10 VPD Binary Device Identifier: %s\n", 1173 &vpd->device_identifier[0]); 1174 break; 1175 case 0x02: /* ASCII */ 1176 sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n", 1177 &vpd->device_identifier[0]); 1178 break; 1179 case 0x03: /* UTF-8 */ 1180 sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n", 1181 &vpd->device_identifier[0]); 1182 break; 1183 default: 1184 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" 1185 " 0x%02x", vpd->device_identifier_code_set); 1186 ret = -EINVAL; 1187 break; 1188 } 1189 1190 if (p_buf) 1191 strncpy(p_buf, buf, p_buf_len); 1192 else 1193 pr_debug("%s", buf); 1194 1195 return ret; 1196 } 1197 1198 int 1199 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) 1200 { 1201 static const char hex_str[] = "0123456789abcdef"; 1202 int j = 0, i = 4; /* offset to start of the identifer */ 1203 1204 /* 1205 * The VPD Code Set (encoding) 1206 * 1207 * from spc3r23.pdf Section 7.6.3.1 Table 296 1208 */ 1209 vpd->device_identifier_code_set = (page_83[0] & 0x0f); 1210 switch (vpd->device_identifier_code_set) { 1211 case 0x01: /* Binary */ 1212 vpd->device_identifier[j++] = 1213 hex_str[vpd->device_identifier_type]; 1214 while (i < (4 + page_83[3])) { 1215 vpd->device_identifier[j++] = 1216 hex_str[(page_83[i] & 0xf0) >> 4]; 1217 vpd->device_identifier[j++] = 1218 hex_str[page_83[i] & 0x0f]; 1219 i++; 1220 } 1221 break; 1222 case 0x02: /* ASCII */ 1223 case 0x03: /* UTF-8 */ 1224 while (i < (4 + page_83[3])) 1225 vpd->device_identifier[j++] = page_83[i++]; 1226 break; 1227 default: 1228 break; 1229 } 1230 1231 return transport_dump_vpd_ident(vpd, NULL, 0); 1232 } 1233 EXPORT_SYMBOL(transport_set_vpd_ident); 1234 1235 static void core_setup_task_attr_emulation(struct se_device *dev) 1236 { 1237 /* 1238 * If this device is from Target_Core_Mod/pSCSI, disable the 1239 * SAM Task Attribute emulation. 1240 * 1241 * This is currently not available in upsream Linux/SCSI Target 1242 * mode code, and is assumed to be disabled while using TCM/pSCSI. 1243 */ 1244 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1245 dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; 1246 return; 1247 } 1248 1249 dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; 1250 pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" 1251 " device\n", dev->transport->name, 1252 dev->transport->get_device_rev(dev)); 1253 } 1254 1255 static void scsi_dump_inquiry(struct se_device *dev) 1256 { 1257 struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; 1258 char buf[17]; 1259 int i, device_type; 1260 /* 1261 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer 1262 */ 1263 for (i = 0; i < 8; i++) 1264 if (wwn->vendor[i] >= 0x20) 1265 buf[i] = wwn->vendor[i]; 1266 else 1267 buf[i] = ' '; 1268 buf[i] = '\0'; 1269 pr_debug(" Vendor: %s\n", buf); 1270 1271 for (i = 0; i < 16; i++) 1272 if (wwn->model[i] >= 0x20) 1273 buf[i] = wwn->model[i]; 1274 else 1275 buf[i] = ' '; 1276 buf[i] = '\0'; 1277 pr_debug(" Model: %s\n", buf); 1278 1279 for (i = 0; i < 4; i++) 1280 if (wwn->revision[i] >= 0x20) 1281 buf[i] = wwn->revision[i]; 1282 else 1283 buf[i] = ' '; 1284 buf[i] = '\0'; 1285 pr_debug(" Revision: %s\n", buf); 1286 1287 device_type = dev->transport->get_device_type(dev); 1288 pr_debug(" Type: %s ", scsi_device_type(device_type)); 1289 pr_debug(" ANSI SCSI revision: %02x\n", 1290 dev->transport->get_device_rev(dev)); 1291 } 1292 1293 struct se_device *transport_add_device_to_core_hba( 1294 struct se_hba *hba, 1295 struct se_subsystem_api *transport, 1296 struct se_subsystem_dev *se_dev, 1297 u32 device_flags, 1298 void *transport_dev, 1299 struct se_dev_limits *dev_limits, 1300 const char *inquiry_prod, 1301 const char *inquiry_rev) 1302 { 1303 int force_pt; 1304 struct se_device *dev; 1305 1306 dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); 1307 if (!dev) { 1308 pr_err("Unable to allocate memory for se_dev_t\n"); 1309 return NULL; 1310 } 1311 1312 transport_init_queue_obj(&dev->dev_queue_obj); 1313 dev->dev_flags = device_flags; 1314 dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; 1315 dev->dev_ptr = transport_dev; 1316 dev->se_hba = hba; 1317 dev->se_sub_dev = se_dev; 1318 dev->transport = transport; 1319 INIT_LIST_HEAD(&dev->dev_list); 1320 INIT_LIST_HEAD(&dev->dev_sep_list); 1321 INIT_LIST_HEAD(&dev->dev_tmr_list); 1322 INIT_LIST_HEAD(&dev->execute_task_list); 1323 INIT_LIST_HEAD(&dev->delayed_cmd_list); 1324 INIT_LIST_HEAD(&dev->state_task_list); 1325 INIT_LIST_HEAD(&dev->qf_cmd_list); 1326 spin_lock_init(&dev->execute_task_lock); 1327 spin_lock_init(&dev->delayed_cmd_lock); 1328 spin_lock_init(&dev->dev_reservation_lock); 1329 spin_lock_init(&dev->dev_status_lock); 1330 spin_lock_init(&dev->se_port_lock); 1331 spin_lock_init(&dev->se_tmr_lock); 1332 spin_lock_init(&dev->qf_cmd_lock); 1333 atomic_set(&dev->dev_ordered_id, 0); 1334 1335 se_dev_set_default_attribs(dev, dev_limits); 1336 1337 dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); 1338 dev->creation_time = get_jiffies_64(); 1339 spin_lock_init(&dev->stats_lock); 1340 1341 spin_lock(&hba->device_lock); 1342 list_add_tail(&dev->dev_list, &hba->hba_dev_list); 1343 hba->dev_count++; 1344 spin_unlock(&hba->device_lock); 1345 /* 1346 * Setup the SAM Task Attribute emulation for struct se_device 1347 */ 1348 core_setup_task_attr_emulation(dev); 1349 /* 1350 * Force PR and ALUA passthrough emulation with internal object use. 1351 */ 1352 force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE); 1353 /* 1354 * Setup the Reservations infrastructure for struct se_device 1355 */ 1356 core_setup_reservations(dev, force_pt); 1357 /* 1358 * Setup the Asymmetric Logical Unit Assignment for struct se_device 1359 */ 1360 if (core_setup_alua(dev, force_pt) < 0) 1361 goto out; 1362 1363 /* 1364 * Startup the struct se_device processing thread 1365 */ 1366 dev->process_thread = kthread_run(transport_processing_thread, dev, 1367 "LIO_%s", dev->transport->name); 1368 if (IS_ERR(dev->process_thread)) { 1369 pr_err("Unable to create kthread: LIO_%s\n", 1370 dev->transport->name); 1371 goto out; 1372 } 1373 /* 1374 * Setup work_queue for QUEUE_FULL 1375 */ 1376 INIT_WORK(&dev->qf_work_queue, target_qf_do_work); 1377 /* 1378 * Preload the initial INQUIRY const values if we are doing 1379 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI 1380 * passthrough because this is being provided by the backend LLD. 1381 * This is required so that transport_get_inquiry() copies these 1382 * originals once back into DEV_T10_WWN(dev) for the virtual device 1383 * setup. 1384 */ 1385 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 1386 if (!inquiry_prod || !inquiry_rev) { 1387 pr_err("All non TCM/pSCSI plugins require" 1388 " INQUIRY consts\n"); 1389 goto out; 1390 } 1391 1392 strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); 1393 strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); 1394 strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); 1395 } 1396 scsi_dump_inquiry(dev); 1397 1398 return dev; 1399 out: 1400 kthread_stop(dev->process_thread); 1401 1402 spin_lock(&hba->device_lock); 1403 list_del(&dev->dev_list); 1404 hba->dev_count--; 1405 spin_unlock(&hba->device_lock); 1406 1407 se_release_vpd_for_dev(dev); 1408 1409 kfree(dev); 1410 1411 return NULL; 1412 } 1413 EXPORT_SYMBOL(transport_add_device_to_core_hba); 1414 1415 /* transport_generic_prepare_cdb(): 1416 * 1417 * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will 1418 * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2. 1419 * The point of this is since we are mapping iSCSI LUNs to 1420 * SCSI Target IDs having a non-zero LUN in the CDB will throw the 1421 * devices and HBAs for a loop. 1422 */ 1423 static inline void transport_generic_prepare_cdb( 1424 unsigned char *cdb) 1425 { 1426 switch (cdb[0]) { 1427 case READ_10: /* SBC - RDProtect */ 1428 case READ_12: /* SBC - RDProtect */ 1429 case READ_16: /* SBC - RDProtect */ 1430 case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ 1431 case VERIFY: /* SBC - VRProtect */ 1432 case VERIFY_16: /* SBC - VRProtect */ 1433 case WRITE_VERIFY: /* SBC - VRProtect */ 1434 case WRITE_VERIFY_12: /* SBC - VRProtect */ 1435 break; 1436 default: 1437 cdb[1] &= 0x1f; /* clear logical unit number */ 1438 break; 1439 } 1440 } 1441 1442 static struct se_task * 1443 transport_generic_get_task(struct se_cmd *cmd, 1444 enum dma_data_direction data_direction) 1445 { 1446 struct se_task *task; 1447 struct se_device *dev = cmd->se_dev; 1448 1449 task = dev->transport->alloc_task(cmd->t_task_cdb); 1450 if (!task) { 1451 pr_err("Unable to allocate struct se_task\n"); 1452 return NULL; 1453 } 1454 1455 INIT_LIST_HEAD(&task->t_list); 1456 INIT_LIST_HEAD(&task->t_execute_list); 1457 INIT_LIST_HEAD(&task->t_state_list); 1458 init_completion(&task->task_stop_comp); 1459 task->task_se_cmd = cmd; 1460 task->task_data_direction = data_direction; 1461 1462 return task; 1463 } 1464 1465 static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); 1466 1467 /* 1468 * Used by fabric modules containing a local struct se_cmd within their 1469 * fabric dependent per I/O descriptor. 1470 */ 1471 void transport_init_se_cmd( 1472 struct se_cmd *cmd, 1473 struct target_core_fabric_ops *tfo, 1474 struct se_session *se_sess, 1475 u32 data_length, 1476 int data_direction, 1477 int task_attr, 1478 unsigned char *sense_buffer) 1479 { 1480 INIT_LIST_HEAD(&cmd->se_lun_node); 1481 INIT_LIST_HEAD(&cmd->se_delayed_node); 1482 INIT_LIST_HEAD(&cmd->se_qf_node); 1483 INIT_LIST_HEAD(&cmd->se_queue_node); 1484 INIT_LIST_HEAD(&cmd->se_cmd_list); 1485 INIT_LIST_HEAD(&cmd->t_task_list); 1486 init_completion(&cmd->transport_lun_fe_stop_comp); 1487 init_completion(&cmd->transport_lun_stop_comp); 1488 init_completion(&cmd->t_transport_stop_comp); 1489 init_completion(&cmd->cmd_wait_comp); 1490 spin_lock_init(&cmd->t_state_lock); 1491 atomic_set(&cmd->transport_dev_active, 1); 1492 1493 cmd->se_tfo = tfo; 1494 cmd->se_sess = se_sess; 1495 cmd->data_length = data_length; 1496 cmd->data_direction = data_direction; 1497 cmd->sam_task_attr = task_attr; 1498 cmd->sense_buffer = sense_buffer; 1499 } 1500 EXPORT_SYMBOL(transport_init_se_cmd); 1501 1502 static int transport_check_alloc_task_attr(struct se_cmd *cmd) 1503 { 1504 /* 1505 * Check if SAM Task Attribute emulation is enabled for this 1506 * struct se_device storage object 1507 */ 1508 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) 1509 return 0; 1510 1511 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1512 pr_debug("SAM Task Attribute ACA" 1513 " emulation is not supported\n"); 1514 return -EINVAL; 1515 } 1516 /* 1517 * Used to determine when ORDERED commands should go from 1518 * Dormant to Active status. 1519 */ 1520 cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); 1521 smp_mb__after_atomic_inc(); 1522 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1523 cmd->se_ordered_id, cmd->sam_task_attr, 1524 cmd->se_dev->transport->name); 1525 return 0; 1526 } 1527 1528 /* transport_generic_allocate_tasks(): 1529 * 1530 * Called from fabric RX Thread. 1531 */ 1532 int transport_generic_allocate_tasks( 1533 struct se_cmd *cmd, 1534 unsigned char *cdb) 1535 { 1536 int ret; 1537 1538 transport_generic_prepare_cdb(cdb); 1539 /* 1540 * Ensure that the received CDB is less than the max (252 + 8) bytes 1541 * for VARIABLE_LENGTH_CMD 1542 */ 1543 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { 1544 pr_err("Received SCSI CDB with command_size: %d that" 1545 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", 1546 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); 1547 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1548 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 1549 return -EINVAL; 1550 } 1551 /* 1552 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, 1553 * allocate the additional extended CDB buffer now.. Otherwise 1554 * setup the pointer from __t_task_cdb to t_task_cdb. 1555 */ 1556 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { 1557 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), 1558 GFP_KERNEL); 1559 if (!cmd->t_task_cdb) { 1560 pr_err("Unable to allocate cmd->t_task_cdb" 1561 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", 1562 scsi_command_size(cdb), 1563 (unsigned long)sizeof(cmd->__t_task_cdb)); 1564 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1565 cmd->scsi_sense_reason = 1566 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1567 return -ENOMEM; 1568 } 1569 } else 1570 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; 1571 /* 1572 * Copy the original CDB into cmd-> 1573 */ 1574 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); 1575 /* 1576 * Setup the received CDB based on SCSI defined opcodes and 1577 * perform unit attention, persistent reservations and ALUA 1578 * checks for virtual device backends. The cmd->t_task_cdb 1579 * pointer is expected to be setup before we reach this point. 1580 */ 1581 ret = transport_generic_cmd_sequencer(cmd, cdb); 1582 if (ret < 0) 1583 return ret; 1584 /* 1585 * Check for SAM Task Attribute Emulation 1586 */ 1587 if (transport_check_alloc_task_attr(cmd) < 0) { 1588 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1589 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 1590 return -EINVAL; 1591 } 1592 spin_lock(&cmd->se_lun->lun_sep_lock); 1593 if (cmd->se_lun->lun_sep) 1594 cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; 1595 spin_unlock(&cmd->se_lun->lun_sep_lock); 1596 return 0; 1597 } 1598 EXPORT_SYMBOL(transport_generic_allocate_tasks); 1599 1600 /* 1601 * Used by fabric module frontends to queue tasks directly. 1602 * Many only be used from process context only 1603 */ 1604 int transport_handle_cdb_direct( 1605 struct se_cmd *cmd) 1606 { 1607 int ret; 1608 1609 if (!cmd->se_lun) { 1610 dump_stack(); 1611 pr_err("cmd->se_lun is NULL\n"); 1612 return -EINVAL; 1613 } 1614 if (in_interrupt()) { 1615 dump_stack(); 1616 pr_err("transport_generic_handle_cdb cannot be called" 1617 " from interrupt context\n"); 1618 return -EINVAL; 1619 } 1620 /* 1621 * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following 1622 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() 1623 * in existing usage to ensure that outstanding descriptors are handled 1624 * correctly during shutdown via transport_wait_for_tasks() 1625 * 1626 * Also, we don't take cmd->t_state_lock here as we only expect 1627 * this to be called for initial descriptor submission. 1628 */ 1629 cmd->t_state = TRANSPORT_NEW_CMD; 1630 atomic_set(&cmd->t_transport_active, 1); 1631 /* 1632 * transport_generic_new_cmd() is already handling QUEUE_FULL, 1633 * so follow TRANSPORT_NEW_CMD processing thread context usage 1634 * and call transport_generic_request_failure() if necessary.. 1635 */ 1636 ret = transport_generic_new_cmd(cmd); 1637 if (ret < 0) 1638 transport_generic_request_failure(cmd); 1639 1640 return 0; 1641 } 1642 EXPORT_SYMBOL(transport_handle_cdb_direct); 1643 1644 /** 1645 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd 1646 * 1647 * @se_cmd: command descriptor to submit 1648 * @se_sess: associated se_sess for endpoint 1649 * @cdb: pointer to SCSI CDB 1650 * @sense: pointer to SCSI sense buffer 1651 * @unpacked_lun: unpacked LUN to reference for struct se_lun 1652 * @data_length: fabric expected data transfer length 1653 * @task_addr: SAM task attribute 1654 * @data_dir: DMA data direction 1655 * @flags: flags for command submission from target_sc_flags_tables 1656 * 1657 * This may only be called from process context, and also currently 1658 * assumes internal allocation of fabric payload buffer by target-core. 1659 **/ 1660 void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, 1661 unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, 1662 u32 data_length, int task_attr, int data_dir, int flags) 1663 { 1664 struct se_portal_group *se_tpg; 1665 int rc; 1666 1667 se_tpg = se_sess->se_tpg; 1668 BUG_ON(!se_tpg); 1669 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); 1670 BUG_ON(in_interrupt()); 1671 /* 1672 * Initialize se_cmd for target operation. From this point 1673 * exceptions are handled by sending exception status via 1674 * target_core_fabric_ops->queue_status() callback 1675 */ 1676 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1677 data_length, data_dir, task_attr, sense); 1678 /* 1679 * Obtain struct se_cmd->cmd_kref reference and add new cmd to 1680 * se_sess->sess_cmd_list. A second kref_get here is necessary 1681 * for fabrics using TARGET_SCF_ACK_KREF that expect a second 1682 * kref_put() to happen during fabric packet acknowledgement. 1683 */ 1684 target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); 1685 /* 1686 * Signal bidirectional data payloads to target-core 1687 */ 1688 if (flags & TARGET_SCF_BIDI_OP) 1689 se_cmd->se_cmd_flags |= SCF_BIDI; 1690 /* 1691 * Locate se_lun pointer and attach it to struct se_cmd 1692 */ 1693 if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) { 1694 transport_send_check_condition_and_sense(se_cmd, 1695 se_cmd->scsi_sense_reason, 0); 1696 target_put_sess_cmd(se_sess, se_cmd); 1697 return; 1698 } 1699 /* 1700 * Sanitize CDBs via transport_generic_cmd_sequencer() and 1701 * allocate the necessary tasks to complete the received CDB+data 1702 */ 1703 rc = transport_generic_allocate_tasks(se_cmd, cdb); 1704 if (rc != 0) { 1705 transport_generic_request_failure(se_cmd); 1706 return; 1707 } 1708 /* 1709 * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend 1710 * for immediate execution of READs, otherwise wait for 1711 * transport_generic_handle_data() to be called for WRITEs 1712 * when fabric has filled the incoming buffer. 1713 */ 1714 transport_handle_cdb_direct(se_cmd); 1715 return; 1716 } 1717 EXPORT_SYMBOL(target_submit_cmd); 1718 1719 /* 1720 * Used by fabric module frontends defining a TFO->new_cmd_map() caller 1721 * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to 1722 * complete setup in TCM process context w/ TFO->new_cmd_map(). 1723 */ 1724 int transport_generic_handle_cdb_map( 1725 struct se_cmd *cmd) 1726 { 1727 if (!cmd->se_lun) { 1728 dump_stack(); 1729 pr_err("cmd->se_lun is NULL\n"); 1730 return -EINVAL; 1731 } 1732 1733 transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false); 1734 return 0; 1735 } 1736 EXPORT_SYMBOL(transport_generic_handle_cdb_map); 1737 1738 /* transport_generic_handle_data(): 1739 * 1740 * 1741 */ 1742 int transport_generic_handle_data( 1743 struct se_cmd *cmd) 1744 { 1745 /* 1746 * For the software fabric case, then we assume the nexus is being 1747 * failed/shutdown when signals are pending from the kthread context 1748 * caller, so we return a failure. For the HW target mode case running 1749 * in interrupt code, the signal_pending() check is skipped. 1750 */ 1751 if (!in_interrupt() && signal_pending(current)) 1752 return -EPERM; 1753 /* 1754 * If the received CDB has aleady been ABORTED by the generic 1755 * target engine, we now call transport_check_aborted_status() 1756 * to queue any delated TASK_ABORTED status for the received CDB to the 1757 * fabric module as we are expecting no further incoming DATA OUT 1758 * sequences at this point. 1759 */ 1760 if (transport_check_aborted_status(cmd, 1) != 0) 1761 return 0; 1762 1763 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false); 1764 return 0; 1765 } 1766 EXPORT_SYMBOL(transport_generic_handle_data); 1767 1768 /* transport_generic_handle_tmr(): 1769 * 1770 * 1771 */ 1772 int transport_generic_handle_tmr( 1773 struct se_cmd *cmd) 1774 { 1775 transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false); 1776 return 0; 1777 } 1778 EXPORT_SYMBOL(transport_generic_handle_tmr); 1779 1780 /* 1781 * If the task is active, request it to be stopped and sleep until it 1782 * has completed. 1783 */ 1784 bool target_stop_task(struct se_task *task, unsigned long *flags) 1785 { 1786 struct se_cmd *cmd = task->task_se_cmd; 1787 bool was_active = false; 1788 1789 if (task->task_flags & TF_ACTIVE) { 1790 task->task_flags |= TF_REQUEST_STOP; 1791 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); 1792 1793 pr_debug("Task %p waiting to complete\n", task); 1794 wait_for_completion(&task->task_stop_comp); 1795 pr_debug("Task %p stopped successfully\n", task); 1796 1797 spin_lock_irqsave(&cmd->t_state_lock, *flags); 1798 atomic_dec(&cmd->t_task_cdbs_left); 1799 task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); 1800 was_active = true; 1801 } 1802 1803 return was_active; 1804 } 1805 1806 static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) 1807 { 1808 struct se_task *task, *task_tmp; 1809 unsigned long flags; 1810 int ret = 0; 1811 1812 pr_debug("ITT[0x%08x] - Stopping tasks\n", 1813 cmd->se_tfo->get_task_tag(cmd)); 1814 1815 /* 1816 * No tasks remain in the execution queue 1817 */ 1818 spin_lock_irqsave(&cmd->t_state_lock, flags); 1819 list_for_each_entry_safe(task, task_tmp, 1820 &cmd->t_task_list, t_list) { 1821 pr_debug("Processing task %p\n", task); 1822 /* 1823 * If the struct se_task has not been sent and is not active, 1824 * remove the struct se_task from the execution queue. 1825 */ 1826 if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) { 1827 spin_unlock_irqrestore(&cmd->t_state_lock, 1828 flags); 1829 transport_remove_task_from_execute_queue(task, 1830 cmd->se_dev); 1831 1832 pr_debug("Task %p removed from execute queue\n", task); 1833 spin_lock_irqsave(&cmd->t_state_lock, flags); 1834 continue; 1835 } 1836 1837 if (!target_stop_task(task, &flags)) { 1838 pr_debug("Task %p - did nothing\n", task); 1839 ret++; 1840 } 1841 } 1842 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 1843 1844 return ret; 1845 } 1846 1847 /* 1848 * Handle SAM-esque emulation for generic transport request failures. 1849 */ 1850 static void transport_generic_request_failure(struct se_cmd *cmd) 1851 { 1852 int ret = 0; 1853 1854 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" 1855 " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), 1856 cmd->t_task_cdb[0]); 1857 pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n", 1858 cmd->se_tfo->get_cmd_state(cmd), 1859 cmd->t_state, cmd->scsi_sense_reason); 1860 pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" 1861 " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" 1862 " t_transport_active: %d t_transport_stop: %d" 1863 " t_transport_sent: %d\n", cmd->t_task_list_num, 1864 atomic_read(&cmd->t_task_cdbs_left), 1865 atomic_read(&cmd->t_task_cdbs_sent), 1866 atomic_read(&cmd->t_task_cdbs_ex_left), 1867 atomic_read(&cmd->t_transport_active), 1868 atomic_read(&cmd->t_transport_stop), 1869 atomic_read(&cmd->t_transport_sent)); 1870 1871 /* 1872 * For SAM Task Attribute emulation for failed struct se_cmd 1873 */ 1874 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 1875 transport_complete_task_attr(cmd); 1876 1877 switch (cmd->scsi_sense_reason) { 1878 case TCM_NON_EXISTENT_LUN: 1879 case TCM_UNSUPPORTED_SCSI_OPCODE: 1880 case TCM_INVALID_CDB_FIELD: 1881 case TCM_INVALID_PARAMETER_LIST: 1882 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 1883 case TCM_UNKNOWN_MODE_PAGE: 1884 case TCM_WRITE_PROTECTED: 1885 case TCM_CHECK_CONDITION_ABORT_CMD: 1886 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 1887 case TCM_CHECK_CONDITION_NOT_READY: 1888 break; 1889 case TCM_RESERVATION_CONFLICT: 1890 /* 1891 * No SENSE Data payload for this case, set SCSI Status 1892 * and queue the response to $FABRIC_MOD. 1893 * 1894 * Uses linux/include/scsi/scsi.h SAM status codes defs 1895 */ 1896 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; 1897 /* 1898 * For UA Interlock Code 11b, a RESERVATION CONFLICT will 1899 * establish a UNIT ATTENTION with PREVIOUS RESERVATION 1900 * CONFLICT STATUS. 1901 * 1902 * See spc4r17, section 7.4.6 Control Mode Page, Table 349 1903 */ 1904 if (cmd->se_sess && 1905 cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) 1906 core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, 1907 cmd->orig_fe_lun, 0x2C, 1908 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); 1909 1910 ret = cmd->se_tfo->queue_status(cmd); 1911 if (ret == -EAGAIN || ret == -ENOMEM) 1912 goto queue_full; 1913 goto check_stop; 1914 default: 1915 pr_err("Unknown transport error for CDB 0x%02x: %d\n", 1916 cmd->t_task_cdb[0], cmd->scsi_sense_reason); 1917 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 1918 break; 1919 } 1920 /* 1921 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, 1922 * make the call to transport_send_check_condition_and_sense() 1923 * directly. Otherwise expect the fabric to make the call to 1924 * transport_send_check_condition_and_sense() after handling 1925 * possible unsoliticied write data payloads. 1926 */ 1927 ret = transport_send_check_condition_and_sense(cmd, 1928 cmd->scsi_sense_reason, 0); 1929 if (ret == -EAGAIN || ret == -ENOMEM) 1930 goto queue_full; 1931 1932 check_stop: 1933 transport_lun_remove_cmd(cmd); 1934 if (!transport_cmd_check_stop_to_fabric(cmd)) 1935 ; 1936 return; 1937 1938 queue_full: 1939 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 1940 transport_handle_queue_full(cmd, cmd->se_dev); 1941 } 1942 1943 static inline u32 transport_lba_21(unsigned char *cdb) 1944 { 1945 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 1946 } 1947 1948 static inline u32 transport_lba_32(unsigned char *cdb) 1949 { 1950 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 1951 } 1952 1953 static inline unsigned long long transport_lba_64(unsigned char *cdb) 1954 { 1955 unsigned int __v1, __v2; 1956 1957 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 1958 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1959 1960 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 1961 } 1962 1963 /* 1964 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 1965 */ 1966 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 1967 { 1968 unsigned int __v1, __v2; 1969 1970 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 1971 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 1972 1973 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 1974 } 1975 1976 static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) 1977 { 1978 unsigned long flags; 1979 1980 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1981 se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; 1982 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1983 } 1984 1985 /* 1986 * Called from Fabric Module context from transport_execute_tasks() 1987 * 1988 * The return of this function determins if the tasks from struct se_cmd 1989 * get added to the execution queue in transport_execute_tasks(), 1990 * or are added to the delayed or ordered lists here. 1991 */ 1992 static inline int transport_execute_task_attr(struct se_cmd *cmd) 1993 { 1994 if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) 1995 return 1; 1996 /* 1997 * Check for the existence of HEAD_OF_QUEUE, and if true return 1 1998 * to allow the passed struct se_cmd list of tasks to the front of the list. 1999 */ 2000 if (cmd->sam_task_attr == MSG_HEAD_TAG) { 2001 pr_debug("Added HEAD_OF_QUEUE for CDB:" 2002 " 0x%02x, se_ordered_id: %u\n", 2003 cmd->t_task_cdb[0], 2004 cmd->se_ordered_id); 2005 return 1; 2006 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 2007 atomic_inc(&cmd->se_dev->dev_ordered_sync); 2008 smp_mb__after_atomic_inc(); 2009 2010 pr_debug("Added ORDERED for CDB: 0x%02x to ordered" 2011 " list, se_ordered_id: %u\n", 2012 cmd->t_task_cdb[0], 2013 cmd->se_ordered_id); 2014 /* 2015 * Add ORDERED command to tail of execution queue if 2016 * no other older commands exist that need to be 2017 * completed first. 2018 */ 2019 if (!atomic_read(&cmd->se_dev->simple_cmds)) 2020 return 1; 2021 } else { 2022 /* 2023 * For SIMPLE and UNTAGGED Task Attribute commands 2024 */ 2025 atomic_inc(&cmd->se_dev->simple_cmds); 2026 smp_mb__after_atomic_inc(); 2027 } 2028 /* 2029 * Otherwise if one or more outstanding ORDERED task attribute exist, 2030 * add the dormant task(s) built for the passed struct se_cmd to the 2031 * execution queue and become in Active state for this struct se_device. 2032 */ 2033 if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { 2034 /* 2035 * Otherwise, add cmd w/ tasks to delayed cmd queue that 2036 * will be drained upon completion of HEAD_OF_QUEUE task. 2037 */ 2038 spin_lock(&cmd->se_dev->delayed_cmd_lock); 2039 cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; 2040 list_add_tail(&cmd->se_delayed_node, 2041 &cmd->se_dev->delayed_cmd_list); 2042 spin_unlock(&cmd->se_dev->delayed_cmd_lock); 2043 2044 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" 2045 " delayed CMD list, se_ordered_id: %u\n", 2046 cmd->t_task_cdb[0], cmd->sam_task_attr, 2047 cmd->se_ordered_id); 2048 /* 2049 * Return zero to let transport_execute_tasks() know 2050 * not to add the delayed tasks to the execution list. 2051 */ 2052 return 0; 2053 } 2054 /* 2055 * Otherwise, no ORDERED task attributes exist.. 2056 */ 2057 return 1; 2058 } 2059 2060 /* 2061 * Called from fabric module context in transport_generic_new_cmd() and 2062 * transport_generic_process_write() 2063 */ 2064 static int transport_execute_tasks(struct se_cmd *cmd) 2065 { 2066 int add_tasks; 2067 struct se_device *se_dev = cmd->se_dev; 2068 /* 2069 * Call transport_cmd_check_stop() to see if a fabric exception 2070 * has occurred that prevents execution. 2071 */ 2072 if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { 2073 /* 2074 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE 2075 * attribute for the tasks of the received struct se_cmd CDB 2076 */ 2077 add_tasks = transport_execute_task_attr(cmd); 2078 if (!add_tasks) 2079 goto execute_tasks; 2080 /* 2081 * __transport_execute_tasks() -> __transport_add_tasks_from_cmd() 2082 * adds associated se_tasks while holding dev->execute_task_lock 2083 * before I/O dispath to avoid a double spinlock access. 2084 */ 2085 __transport_execute_tasks(se_dev, cmd); 2086 return 0; 2087 } 2088 2089 execute_tasks: 2090 __transport_execute_tasks(se_dev, NULL); 2091 return 0; 2092 } 2093 2094 /* 2095 * Called to check struct se_device tcq depth window, and once open pull struct se_task 2096 * from struct se_device->execute_task_list and 2097 * 2098 * Called from transport_processing_thread() 2099 */ 2100 static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd) 2101 { 2102 int error; 2103 struct se_cmd *cmd = NULL; 2104 struct se_task *task = NULL; 2105 unsigned long flags; 2106 2107 check_depth: 2108 spin_lock_irq(&dev->execute_task_lock); 2109 if (new_cmd != NULL) 2110 __transport_add_tasks_from_cmd(new_cmd); 2111 2112 if (list_empty(&dev->execute_task_list)) { 2113 spin_unlock_irq(&dev->execute_task_lock); 2114 return 0; 2115 } 2116 task = list_first_entry(&dev->execute_task_list, 2117 struct se_task, t_execute_list); 2118 __transport_remove_task_from_execute_queue(task, dev); 2119 spin_unlock_irq(&dev->execute_task_lock); 2120 2121 cmd = task->task_se_cmd; 2122 spin_lock_irqsave(&cmd->t_state_lock, flags); 2123 task->task_flags |= (TF_ACTIVE | TF_SENT); 2124 atomic_inc(&cmd->t_task_cdbs_sent); 2125 2126 if (atomic_read(&cmd->t_task_cdbs_sent) == 2127 cmd->t_task_list_num) 2128 atomic_set(&cmd->t_transport_sent, 1); 2129 2130 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2131 2132 if (cmd->execute_task) 2133 error = cmd->execute_task(task); 2134 else 2135 error = dev->transport->do_task(task); 2136 if (error != 0) { 2137 spin_lock_irqsave(&cmd->t_state_lock, flags); 2138 task->task_flags &= ~TF_ACTIVE; 2139 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2140 atomic_set(&cmd->t_transport_sent, 0); 2141 transport_stop_tasks_for_cmd(cmd); 2142 transport_generic_request_failure(cmd); 2143 } 2144 2145 new_cmd = NULL; 2146 goto check_depth; 2147 2148 return 0; 2149 } 2150 2151 static inline u32 transport_get_sectors_6( 2152 unsigned char *cdb, 2153 struct se_cmd *cmd, 2154 int *ret) 2155 { 2156 struct se_device *dev = cmd->se_dev; 2157 2158 /* 2159 * Assume TYPE_DISK for non struct se_device objects. 2160 * Use 8-bit sector value. 2161 */ 2162 if (!dev) 2163 goto type_disk; 2164 2165 /* 2166 * Use 24-bit allocation length for TYPE_TAPE. 2167 */ 2168 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 2169 return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; 2170 2171 /* 2172 * Everything else assume TYPE_DISK Sector CDB location. 2173 * Use 8-bit sector value. SBC-3 says: 2174 * 2175 * A TRANSFER LENGTH field set to zero specifies that 256 2176 * logical blocks shall be written. Any other value 2177 * specifies the number of logical blocks that shall be 2178 * written. 2179 */ 2180 type_disk: 2181 return cdb[4] ? : 256; 2182 } 2183 2184 static inline u32 transport_get_sectors_10( 2185 unsigned char *cdb, 2186 struct se_cmd *cmd, 2187 int *ret) 2188 { 2189 struct se_device *dev = cmd->se_dev; 2190 2191 /* 2192 * Assume TYPE_DISK for non struct se_device objects. 2193 * Use 16-bit sector value. 2194 */ 2195 if (!dev) 2196 goto type_disk; 2197 2198 /* 2199 * XXX_10 is not defined in SSC, throw an exception 2200 */ 2201 if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2202 *ret = -EINVAL; 2203 return 0; 2204 } 2205 2206 /* 2207 * Everything else assume TYPE_DISK Sector CDB location. 2208 * Use 16-bit sector value. 2209 */ 2210 type_disk: 2211 return (u32)(cdb[7] << 8) + cdb[8]; 2212 } 2213 2214 static inline u32 transport_get_sectors_12( 2215 unsigned char *cdb, 2216 struct se_cmd *cmd, 2217 int *ret) 2218 { 2219 struct se_device *dev = cmd->se_dev; 2220 2221 /* 2222 * Assume TYPE_DISK for non struct se_device objects. 2223 * Use 32-bit sector value. 2224 */ 2225 if (!dev) 2226 goto type_disk; 2227 2228 /* 2229 * XXX_12 is not defined in SSC, throw an exception 2230 */ 2231 if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2232 *ret = -EINVAL; 2233 return 0; 2234 } 2235 2236 /* 2237 * Everything else assume TYPE_DISK Sector CDB location. 2238 * Use 32-bit sector value. 2239 */ 2240 type_disk: 2241 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 2242 } 2243 2244 static inline u32 transport_get_sectors_16( 2245 unsigned char *cdb, 2246 struct se_cmd *cmd, 2247 int *ret) 2248 { 2249 struct se_device *dev = cmd->se_dev; 2250 2251 /* 2252 * Assume TYPE_DISK for non struct se_device objects. 2253 * Use 32-bit sector value. 2254 */ 2255 if (!dev) 2256 goto type_disk; 2257 2258 /* 2259 * Use 24-bit allocation length for TYPE_TAPE. 2260 */ 2261 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 2262 return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; 2263 2264 type_disk: 2265 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 2266 (cdb[12] << 8) + cdb[13]; 2267 } 2268 2269 /* 2270 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 2271 */ 2272 static inline u32 transport_get_sectors_32( 2273 unsigned char *cdb, 2274 struct se_cmd *cmd, 2275 int *ret) 2276 { 2277 /* 2278 * Assume TYPE_DISK for non struct se_device objects. 2279 * Use 32-bit sector value. 2280 */ 2281 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 2282 (cdb[30] << 8) + cdb[31]; 2283 2284 } 2285 2286 static inline u32 transport_get_size( 2287 u32 sectors, 2288 unsigned char *cdb, 2289 struct se_cmd *cmd) 2290 { 2291 struct se_device *dev = cmd->se_dev; 2292 2293 if (dev->transport->get_device_type(dev) == TYPE_TAPE) { 2294 if (cdb[1] & 1) { /* sectors */ 2295 return dev->se_sub_dev->se_dev_attrib.block_size * sectors; 2296 } else /* bytes */ 2297 return sectors; 2298 } 2299 #if 0 2300 pr_debug("Returning block_size: %u, sectors: %u == %u for" 2301 " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, 2302 dev->se_sub_dev->se_dev_attrib.block_size * sectors, 2303 dev->transport->name); 2304 #endif 2305 return dev->se_sub_dev->se_dev_attrib.block_size * sectors; 2306 } 2307 2308 static void transport_xor_callback(struct se_cmd *cmd) 2309 { 2310 unsigned char *buf, *addr; 2311 struct scatterlist *sg; 2312 unsigned int offset; 2313 int i; 2314 int count; 2315 /* 2316 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 2317 * 2318 * 1) read the specified logical block(s); 2319 * 2) transfer logical blocks from the data-out buffer; 2320 * 3) XOR the logical blocks transferred from the data-out buffer with 2321 * the logical blocks read, storing the resulting XOR data in a buffer; 2322 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 2323 * blocks transferred from the data-out buffer; and 2324 * 5) transfer the resulting XOR data to the data-in buffer. 2325 */ 2326 buf = kmalloc(cmd->data_length, GFP_KERNEL); 2327 if (!buf) { 2328 pr_err("Unable to allocate xor_callback buf\n"); 2329 return; 2330 } 2331 /* 2332 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 2333 * into the locally allocated *buf 2334 */ 2335 sg_copy_to_buffer(cmd->t_data_sg, 2336 cmd->t_data_nents, 2337 buf, 2338 cmd->data_length); 2339 2340 /* 2341 * Now perform the XOR against the BIDI read memory located at 2342 * cmd->t_mem_bidi_list 2343 */ 2344 2345 offset = 0; 2346 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 2347 addr = kmap_atomic(sg_page(sg), KM_USER0); 2348 if (!addr) 2349 goto out; 2350 2351 for (i = 0; i < sg->length; i++) 2352 *(addr + sg->offset + i) ^= *(buf + offset + i); 2353 2354 offset += sg->length; 2355 kunmap_atomic(addr, KM_USER0); 2356 } 2357 2358 out: 2359 kfree(buf); 2360 } 2361 2362 /* 2363 * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd 2364 */ 2365 static int transport_get_sense_data(struct se_cmd *cmd) 2366 { 2367 unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; 2368 struct se_device *dev = cmd->se_dev; 2369 struct se_task *task = NULL, *task_tmp; 2370 unsigned long flags; 2371 u32 offset = 0; 2372 2373 WARN_ON(!cmd->se_lun); 2374 2375 if (!dev) 2376 return 0; 2377 2378 spin_lock_irqsave(&cmd->t_state_lock, flags); 2379 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 2380 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2381 return 0; 2382 } 2383 2384 list_for_each_entry_safe(task, task_tmp, 2385 &cmd->t_task_list, t_list) { 2386 if (!(task->task_flags & TF_HAS_SENSE)) 2387 continue; 2388 2389 if (!dev->transport->get_sense_buffer) { 2390 pr_err("dev->transport->get_sense_buffer" 2391 " is NULL\n"); 2392 continue; 2393 } 2394 2395 sense_buffer = dev->transport->get_sense_buffer(task); 2396 if (!sense_buffer) { 2397 pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate" 2398 " sense buffer for task with sense\n", 2399 cmd->se_tfo->get_task_tag(cmd), task); 2400 continue; 2401 } 2402 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2403 2404 offset = cmd->se_tfo->set_fabric_sense_len(cmd, 2405 TRANSPORT_SENSE_BUFFER); 2406 2407 memcpy(&buffer[offset], sense_buffer, 2408 TRANSPORT_SENSE_BUFFER); 2409 cmd->scsi_status = task->task_scsi_status; 2410 /* Automatically padded */ 2411 cmd->scsi_sense_length = 2412 (TRANSPORT_SENSE_BUFFER + offset); 2413 2414 pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" 2415 " and sense\n", 2416 dev->se_hba->hba_id, dev->transport->name, 2417 cmd->scsi_status); 2418 return 0; 2419 } 2420 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2421 2422 return -1; 2423 } 2424 2425 static inline long long transport_dev_end_lba(struct se_device *dev) 2426 { 2427 return dev->transport->get_blocks(dev) + 1; 2428 } 2429 2430 static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) 2431 { 2432 struct se_device *dev = cmd->se_dev; 2433 u32 sectors; 2434 2435 if (dev->transport->get_device_type(dev) != TYPE_DISK) 2436 return 0; 2437 2438 sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); 2439 2440 if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { 2441 pr_err("LBA: %llu Sectors: %u exceeds" 2442 " transport_dev_end_lba(): %llu\n", 2443 cmd->t_task_lba, sectors, 2444 transport_dev_end_lba(dev)); 2445 return -EINVAL; 2446 } 2447 2448 return 0; 2449 } 2450 2451 static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) 2452 { 2453 /* 2454 * Determine if the received WRITE_SAME is used to for direct 2455 * passthrough into Linux/SCSI with struct request via TCM/pSCSI 2456 * or we are signaling the use of internal WRITE_SAME + UNMAP=1 2457 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. 2458 */ 2459 int passthrough = (dev->transport->transport_type == 2460 TRANSPORT_PLUGIN_PHBA_PDEV); 2461 2462 if (!passthrough) { 2463 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 2464 pr_err("WRITE_SAME PBDATA and LBDATA" 2465 " bits not supported for Block Discard" 2466 " Emulation\n"); 2467 return -ENOSYS; 2468 } 2469 /* 2470 * Currently for the emulated case we only accept 2471 * tpws with the UNMAP=1 bit set. 2472 */ 2473 if (!(flags[0] & 0x08)) { 2474 pr_err("WRITE_SAME w/o UNMAP bit not" 2475 " supported for Block Discard Emulation\n"); 2476 return -ENOSYS; 2477 } 2478 } 2479 2480 return 0; 2481 } 2482 2483 /* transport_generic_cmd_sequencer(): 2484 * 2485 * Generic Command Sequencer that should work for most DAS transport 2486 * drivers. 2487 * 2488 * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD 2489 * RX Thread. 2490 * 2491 * FIXME: Need to support other SCSI OPCODES where as well. 2492 */ 2493 static int transport_generic_cmd_sequencer( 2494 struct se_cmd *cmd, 2495 unsigned char *cdb) 2496 { 2497 struct se_device *dev = cmd->se_dev; 2498 struct se_subsystem_dev *su_dev = dev->se_sub_dev; 2499 int ret = 0, sector_ret = 0, passthrough; 2500 u32 sectors = 0, size = 0, pr_reg_type = 0; 2501 u16 service_action; 2502 u8 alua_ascq = 0; 2503 /* 2504 * Check for an existing UNIT ATTENTION condition 2505 */ 2506 if (core_scsi3_ua_check(cmd, cdb) < 0) { 2507 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2508 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; 2509 return -EINVAL; 2510 } 2511 /* 2512 * Check status of Asymmetric Logical Unit Assignment port 2513 */ 2514 ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); 2515 if (ret != 0) { 2516 /* 2517 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; 2518 * The ALUA additional sense code qualifier (ASCQ) is determined 2519 * by the ALUA primary or secondary access state.. 2520 */ 2521 if (ret > 0) { 2522 #if 0 2523 pr_debug("[%s]: ALUA TG Port not available," 2524 " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", 2525 cmd->se_tfo->get_fabric_name(), alua_ascq); 2526 #endif 2527 transport_set_sense_codes(cmd, 0x04, alua_ascq); 2528 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2529 cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; 2530 return -EINVAL; 2531 } 2532 goto out_invalid_cdb_field; 2533 } 2534 /* 2535 * Check status for SPC-3 Persistent Reservations 2536 */ 2537 if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { 2538 if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( 2539 cmd, cdb, pr_reg_type) != 0) { 2540 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 2541 cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; 2542 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT; 2543 return -EBUSY; 2544 } 2545 /* 2546 * This means the CDB is allowed for the SCSI Initiator port 2547 * when said port is *NOT* holding the legacy SPC-2 or 2548 * SPC-3 Persistent Reservation. 2549 */ 2550 } 2551 2552 /* 2553 * If we operate in passthrough mode we skip most CDB emulation and 2554 * instead hand the commands down to the physical SCSI device. 2555 */ 2556 passthrough = 2557 (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); 2558 2559 switch (cdb[0]) { 2560 case READ_6: 2561 sectors = transport_get_sectors_6(cdb, cmd, §or_ret); 2562 if (sector_ret) 2563 goto out_unsupported_cdb; 2564 size = transport_get_size(sectors, cdb, cmd); 2565 cmd->t_task_lba = transport_lba_21(cdb); 2566 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2567 break; 2568 case READ_10: 2569 sectors = transport_get_sectors_10(cdb, cmd, §or_ret); 2570 if (sector_ret) 2571 goto out_unsupported_cdb; 2572 size = transport_get_size(sectors, cdb, cmd); 2573 cmd->t_task_lba = transport_lba_32(cdb); 2574 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2575 break; 2576 case READ_12: 2577 sectors = transport_get_sectors_12(cdb, cmd, §or_ret); 2578 if (sector_ret) 2579 goto out_unsupported_cdb; 2580 size = transport_get_size(sectors, cdb, cmd); 2581 cmd->t_task_lba = transport_lba_32(cdb); 2582 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2583 break; 2584 case READ_16: 2585 sectors = transport_get_sectors_16(cdb, cmd, §or_ret); 2586 if (sector_ret) 2587 goto out_unsupported_cdb; 2588 size = transport_get_size(sectors, cdb, cmd); 2589 cmd->t_task_lba = transport_lba_64(cdb); 2590 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2591 break; 2592 case WRITE_6: 2593 sectors = transport_get_sectors_6(cdb, cmd, §or_ret); 2594 if (sector_ret) 2595 goto out_unsupported_cdb; 2596 size = transport_get_size(sectors, cdb, cmd); 2597 cmd->t_task_lba = transport_lba_21(cdb); 2598 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2599 break; 2600 case WRITE_10: 2601 sectors = transport_get_sectors_10(cdb, cmd, §or_ret); 2602 if (sector_ret) 2603 goto out_unsupported_cdb; 2604 size = transport_get_size(sectors, cdb, cmd); 2605 cmd->t_task_lba = transport_lba_32(cdb); 2606 if (cdb[1] & 0x8) 2607 cmd->se_cmd_flags |= SCF_FUA; 2608 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2609 break; 2610 case WRITE_12: 2611 sectors = transport_get_sectors_12(cdb, cmd, §or_ret); 2612 if (sector_ret) 2613 goto out_unsupported_cdb; 2614 size = transport_get_size(sectors, cdb, cmd); 2615 cmd->t_task_lba = transport_lba_32(cdb); 2616 if (cdb[1] & 0x8) 2617 cmd->se_cmd_flags |= SCF_FUA; 2618 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2619 break; 2620 case WRITE_16: 2621 sectors = transport_get_sectors_16(cdb, cmd, §or_ret); 2622 if (sector_ret) 2623 goto out_unsupported_cdb; 2624 size = transport_get_size(sectors, cdb, cmd); 2625 cmd->t_task_lba = transport_lba_64(cdb); 2626 if (cdb[1] & 0x8) 2627 cmd->se_cmd_flags |= SCF_FUA; 2628 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2629 break; 2630 case XDWRITEREAD_10: 2631 if ((cmd->data_direction != DMA_TO_DEVICE) || 2632 !(cmd->se_cmd_flags & SCF_BIDI)) 2633 goto out_invalid_cdb_field; 2634 sectors = transport_get_sectors_10(cdb, cmd, §or_ret); 2635 if (sector_ret) 2636 goto out_unsupported_cdb; 2637 size = transport_get_size(sectors, cdb, cmd); 2638 cmd->t_task_lba = transport_lba_32(cdb); 2639 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2640 2641 /* 2642 * Do now allow BIDI commands for passthrough mode. 2643 */ 2644 if (passthrough) 2645 goto out_unsupported_cdb; 2646 2647 /* 2648 * Setup BIDI XOR callback to be run after I/O completion. 2649 */ 2650 cmd->transport_complete_callback = &transport_xor_callback; 2651 if (cdb[1] & 0x8) 2652 cmd->se_cmd_flags |= SCF_FUA; 2653 break; 2654 case VARIABLE_LENGTH_CMD: 2655 service_action = get_unaligned_be16(&cdb[8]); 2656 switch (service_action) { 2657 case XDWRITEREAD_32: 2658 sectors = transport_get_sectors_32(cdb, cmd, §or_ret); 2659 if (sector_ret) 2660 goto out_unsupported_cdb; 2661 size = transport_get_size(sectors, cdb, cmd); 2662 /* 2663 * Use WRITE_32 and READ_32 opcodes for the emulated 2664 * XDWRITE_READ_32 logic. 2665 */ 2666 cmd->t_task_lba = transport_lba_64_ext(cdb); 2667 cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; 2668 2669 /* 2670 * Do now allow BIDI commands for passthrough mode. 2671 */ 2672 if (passthrough) 2673 goto out_unsupported_cdb; 2674 2675 /* 2676 * Setup BIDI XOR callback to be run during after I/O 2677 * completion. 2678 */ 2679 cmd->transport_complete_callback = &transport_xor_callback; 2680 if (cdb[1] & 0x8) 2681 cmd->se_cmd_flags |= SCF_FUA; 2682 break; 2683 case WRITE_SAME_32: 2684 sectors = transport_get_sectors_32(cdb, cmd, §or_ret); 2685 if (sector_ret) 2686 goto out_unsupported_cdb; 2687 2688 if (sectors) 2689 size = transport_get_size(1, cdb, cmd); 2690 else { 2691 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 2692 " supported\n"); 2693 goto out_invalid_cdb_field; 2694 } 2695 2696 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 2697 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2698 2699 if (target_check_write_same_discard(&cdb[10], dev) < 0) 2700 goto out_unsupported_cdb; 2701 if (!passthrough) 2702 cmd->execute_task = target_emulate_write_same; 2703 break; 2704 default: 2705 pr_err("VARIABLE_LENGTH_CMD service action" 2706 " 0x%04x not supported\n", service_action); 2707 goto out_unsupported_cdb; 2708 } 2709 break; 2710 case MAINTENANCE_IN: 2711 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 2712 /* MAINTENANCE_IN from SCC-2 */ 2713 /* 2714 * Check for emulated MI_REPORT_TARGET_PGS. 2715 */ 2716 if (cdb[1] == MI_REPORT_TARGET_PGS && 2717 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 2718 cmd->execute_task = 2719 target_emulate_report_target_port_groups; 2720 } 2721 size = (cdb[6] << 24) | (cdb[7] << 16) | 2722 (cdb[8] << 8) | cdb[9]; 2723 } else { 2724 /* GPCMD_SEND_KEY from multi media commands */ 2725 size = (cdb[8] << 8) + cdb[9]; 2726 } 2727 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2728 break; 2729 case MODE_SELECT: 2730 size = cdb[4]; 2731 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2732 break; 2733 case MODE_SELECT_10: 2734 size = (cdb[7] << 8) + cdb[8]; 2735 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2736 break; 2737 case MODE_SENSE: 2738 size = cdb[4]; 2739 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2740 if (!passthrough) 2741 cmd->execute_task = target_emulate_modesense; 2742 break; 2743 case MODE_SENSE_10: 2744 size = (cdb[7] << 8) + cdb[8]; 2745 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2746 if (!passthrough) 2747 cmd->execute_task = target_emulate_modesense; 2748 break; 2749 case GPCMD_READ_BUFFER_CAPACITY: 2750 case GPCMD_SEND_OPC: 2751 case LOG_SELECT: 2752 case LOG_SENSE: 2753 size = (cdb[7] << 8) + cdb[8]; 2754 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2755 break; 2756 case READ_BLOCK_LIMITS: 2757 size = READ_BLOCK_LEN; 2758 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2759 break; 2760 case GPCMD_GET_CONFIGURATION: 2761 case GPCMD_READ_FORMAT_CAPACITIES: 2762 case GPCMD_READ_DISC_INFO: 2763 case GPCMD_READ_TRACK_RZONE_INFO: 2764 size = (cdb[7] << 8) + cdb[8]; 2765 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2766 break; 2767 case PERSISTENT_RESERVE_IN: 2768 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 2769 cmd->execute_task = target_scsi3_emulate_pr_in; 2770 size = (cdb[7] << 8) + cdb[8]; 2771 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2772 break; 2773 case PERSISTENT_RESERVE_OUT: 2774 if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) 2775 cmd->execute_task = target_scsi3_emulate_pr_out; 2776 size = (cdb[7] << 8) + cdb[8]; 2777 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2778 break; 2779 case GPCMD_MECHANISM_STATUS: 2780 case GPCMD_READ_DVD_STRUCTURE: 2781 size = (cdb[8] << 8) + cdb[9]; 2782 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2783 break; 2784 case READ_POSITION: 2785 size = READ_POSITION_LEN; 2786 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2787 break; 2788 case MAINTENANCE_OUT: 2789 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 2790 /* MAINTENANCE_OUT from SCC-2 2791 * 2792 * Check for emulated MO_SET_TARGET_PGS. 2793 */ 2794 if (cdb[1] == MO_SET_TARGET_PGS && 2795 su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { 2796 cmd->execute_task = 2797 target_emulate_set_target_port_groups; 2798 } 2799 2800 size = (cdb[6] << 24) | (cdb[7] << 16) | 2801 (cdb[8] << 8) | cdb[9]; 2802 } else { 2803 /* GPCMD_REPORT_KEY from multi media commands */ 2804 size = (cdb[8] << 8) + cdb[9]; 2805 } 2806 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2807 break; 2808 case INQUIRY: 2809 size = (cdb[3] << 8) + cdb[4]; 2810 /* 2811 * Do implict HEAD_OF_QUEUE processing for INQUIRY. 2812 * See spc4r17 section 5.3 2813 */ 2814 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 2815 cmd->sam_task_attr = MSG_HEAD_TAG; 2816 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2817 if (!passthrough) 2818 cmd->execute_task = target_emulate_inquiry; 2819 break; 2820 case READ_BUFFER: 2821 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2822 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2823 break; 2824 case READ_CAPACITY: 2825 size = READ_CAP_LEN; 2826 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2827 if (!passthrough) 2828 cmd->execute_task = target_emulate_readcapacity; 2829 break; 2830 case READ_MEDIA_SERIAL_NUMBER: 2831 case SECURITY_PROTOCOL_IN: 2832 case SECURITY_PROTOCOL_OUT: 2833 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 2834 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2835 break; 2836 case SERVICE_ACTION_IN: 2837 switch (cmd->t_task_cdb[1] & 0x1f) { 2838 case SAI_READ_CAPACITY_16: 2839 if (!passthrough) 2840 cmd->execute_task = 2841 target_emulate_readcapacity_16; 2842 break; 2843 default: 2844 if (passthrough) 2845 break; 2846 2847 pr_err("Unsupported SA: 0x%02x\n", 2848 cmd->t_task_cdb[1] & 0x1f); 2849 goto out_unsupported_cdb; 2850 } 2851 /*FALLTHROUGH*/ 2852 case ACCESS_CONTROL_IN: 2853 case ACCESS_CONTROL_OUT: 2854 case EXTENDED_COPY: 2855 case READ_ATTRIBUTE: 2856 case RECEIVE_COPY_RESULTS: 2857 case WRITE_ATTRIBUTE: 2858 size = (cdb[10] << 24) | (cdb[11] << 16) | 2859 (cdb[12] << 8) | cdb[13]; 2860 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2861 break; 2862 case RECEIVE_DIAGNOSTIC: 2863 case SEND_DIAGNOSTIC: 2864 size = (cdb[3] << 8) | cdb[4]; 2865 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2866 break; 2867 /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ 2868 #if 0 2869 case GPCMD_READ_CD: 2870 sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2871 size = (2336 * sectors); 2872 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2873 break; 2874 #endif 2875 case READ_TOC: 2876 size = cdb[8]; 2877 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2878 break; 2879 case REQUEST_SENSE: 2880 size = cdb[4]; 2881 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2882 if (!passthrough) 2883 cmd->execute_task = target_emulate_request_sense; 2884 break; 2885 case READ_ELEMENT_STATUS: 2886 size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; 2887 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2888 break; 2889 case WRITE_BUFFER: 2890 size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 2891 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2892 break; 2893 case RESERVE: 2894 case RESERVE_10: 2895 /* 2896 * The SPC-2 RESERVE does not contain a size in the SCSI CDB. 2897 * Assume the passthrough or $FABRIC_MOD will tell us about it. 2898 */ 2899 if (cdb[0] == RESERVE_10) 2900 size = (cdb[7] << 8) | cdb[8]; 2901 else 2902 size = cmd->data_length; 2903 2904 /* 2905 * Setup the legacy emulated handler for SPC-2 and 2906 * >= SPC-3 compatible reservation handling (CRH=1) 2907 * Otherwise, we assume the underlying SCSI logic is 2908 * is running in SPC_PASSTHROUGH, and wants reservations 2909 * emulation disabled. 2910 */ 2911 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) 2912 cmd->execute_task = target_scsi2_reservation_reserve; 2913 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2914 break; 2915 case RELEASE: 2916 case RELEASE_10: 2917 /* 2918 * The SPC-2 RELEASE does not contain a size in the SCSI CDB. 2919 * Assume the passthrough or $FABRIC_MOD will tell us about it. 2920 */ 2921 if (cdb[0] == RELEASE_10) 2922 size = (cdb[7] << 8) | cdb[8]; 2923 else 2924 size = cmd->data_length; 2925 2926 if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) 2927 cmd->execute_task = target_scsi2_reservation_release; 2928 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2929 break; 2930 case SYNCHRONIZE_CACHE: 2931 case 0x91: /* SYNCHRONIZE_CACHE_16: */ 2932 /* 2933 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE 2934 */ 2935 if (cdb[0] == SYNCHRONIZE_CACHE) { 2936 sectors = transport_get_sectors_10(cdb, cmd, §or_ret); 2937 cmd->t_task_lba = transport_lba_32(cdb); 2938 } else { 2939 sectors = transport_get_sectors_16(cdb, cmd, §or_ret); 2940 cmd->t_task_lba = transport_lba_64(cdb); 2941 } 2942 if (sector_ret) 2943 goto out_unsupported_cdb; 2944 2945 size = transport_get_size(sectors, cdb, cmd); 2946 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 2947 2948 if (passthrough) 2949 break; 2950 2951 /* 2952 * Check to ensure that LBA + Range does not exceed past end of 2953 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls 2954 */ 2955 if ((cmd->t_task_lba != 0) || (sectors != 0)) { 2956 if (transport_cmd_get_valid_sectors(cmd) < 0) 2957 goto out_invalid_cdb_field; 2958 } 2959 cmd->execute_task = target_emulate_synchronize_cache; 2960 break; 2961 case UNMAP: 2962 size = get_unaligned_be16(&cdb[7]); 2963 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2964 if (!passthrough) 2965 cmd->execute_task = target_emulate_unmap; 2966 break; 2967 case WRITE_SAME_16: 2968 sectors = transport_get_sectors_16(cdb, cmd, §or_ret); 2969 if (sector_ret) 2970 goto out_unsupported_cdb; 2971 2972 if (sectors) 2973 size = transport_get_size(1, cdb, cmd); 2974 else { 2975 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 2976 goto out_invalid_cdb_field; 2977 } 2978 2979 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 2980 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 2981 2982 if (target_check_write_same_discard(&cdb[1], dev) < 0) 2983 goto out_unsupported_cdb; 2984 if (!passthrough) 2985 cmd->execute_task = target_emulate_write_same; 2986 break; 2987 case WRITE_SAME: 2988 sectors = transport_get_sectors_10(cdb, cmd, §or_ret); 2989 if (sector_ret) 2990 goto out_unsupported_cdb; 2991 2992 if (sectors) 2993 size = transport_get_size(1, cdb, cmd); 2994 else { 2995 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 2996 goto out_invalid_cdb_field; 2997 } 2998 2999 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 3000 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3001 /* 3002 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 3003 * of byte 1 bit 3 UNMAP instead of original reserved field 3004 */ 3005 if (target_check_write_same_discard(&cdb[1], dev) < 0) 3006 goto out_unsupported_cdb; 3007 if (!passthrough) 3008 cmd->execute_task = target_emulate_write_same; 3009 break; 3010 case ALLOW_MEDIUM_REMOVAL: 3011 case ERASE: 3012 case REZERO_UNIT: 3013 case SEEK_10: 3014 case SPACE: 3015 case START_STOP: 3016 case TEST_UNIT_READY: 3017 case VERIFY: 3018 case WRITE_FILEMARKS: 3019 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 3020 if (!passthrough) 3021 cmd->execute_task = target_emulate_noop; 3022 break; 3023 case GPCMD_CLOSE_TRACK: 3024 case INITIALIZE_ELEMENT_STATUS: 3025 case GPCMD_LOAD_UNLOAD: 3026 case GPCMD_SET_SPEED: 3027 case MOVE_MEDIUM: 3028 cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; 3029 break; 3030 case REPORT_LUNS: 3031 cmd->execute_task = target_report_luns; 3032 size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 3033 /* 3034 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS 3035 * See spc4r17 section 5.3 3036 */ 3037 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3038 cmd->sam_task_attr = MSG_HEAD_TAG; 3039 cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; 3040 break; 3041 default: 3042 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" 3043 " 0x%02x, sending CHECK_CONDITION.\n", 3044 cmd->se_tfo->get_fabric_name(), cdb[0]); 3045 goto out_unsupported_cdb; 3046 } 3047 3048 if (size != cmd->data_length) { 3049 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 3050 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 3051 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 3052 cmd->data_length, size, cdb[0]); 3053 3054 cmd->cmd_spdtl = size; 3055 3056 if (cmd->data_direction == DMA_TO_DEVICE) { 3057 pr_err("Rejecting underflow/overflow" 3058 " WRITE data\n"); 3059 goto out_invalid_cdb_field; 3060 } 3061 /* 3062 * Reject READ_* or WRITE_* with overflow/underflow for 3063 * type SCF_SCSI_DATA_SG_IO_CDB. 3064 */ 3065 if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { 3066 pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" 3067 " CDB on non 512-byte sector setup subsystem" 3068 " plugin: %s\n", dev->transport->name); 3069 /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ 3070 goto out_invalid_cdb_field; 3071 } 3072 3073 if (size > cmd->data_length) { 3074 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; 3075 cmd->residual_count = (size - cmd->data_length); 3076 } else { 3077 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; 3078 cmd->residual_count = (cmd->data_length - size); 3079 } 3080 cmd->data_length = size; 3081 } 3082 3083 /* reject any command that we don't have a handler for */ 3084 if (!(passthrough || cmd->execute_task || 3085 (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) 3086 goto out_unsupported_cdb; 3087 3088 transport_set_supported_SAM_opcode(cmd); 3089 return ret; 3090 3091 out_unsupported_cdb: 3092 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3093 cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; 3094 return -EINVAL; 3095 out_invalid_cdb_field: 3096 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3097 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 3098 return -EINVAL; 3099 } 3100 3101 /* 3102 * Called from I/O completion to determine which dormant/delayed 3103 * and ordered cmds need to have their tasks added to the execution queue. 3104 */ 3105 static void transport_complete_task_attr(struct se_cmd *cmd) 3106 { 3107 struct se_device *dev = cmd->se_dev; 3108 struct se_cmd *cmd_p, *cmd_tmp; 3109 int new_active_tasks = 0; 3110 3111 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 3112 atomic_dec(&dev->simple_cmds); 3113 smp_mb__after_atomic_dec(); 3114 dev->dev_cur_ordered_id++; 3115 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 3116 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 3117 cmd->se_ordered_id); 3118 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 3119 dev->dev_cur_ordered_id++; 3120 pr_debug("Incremented dev_cur_ordered_id: %u for" 3121 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 3122 cmd->se_ordered_id); 3123 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 3124 atomic_dec(&dev->dev_ordered_sync); 3125 smp_mb__after_atomic_dec(); 3126 3127 dev->dev_cur_ordered_id++; 3128 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 3129 " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); 3130 } 3131 /* 3132 * Process all commands up to the last received 3133 * ORDERED task attribute which requires another blocking 3134 * boundary 3135 */ 3136 spin_lock(&dev->delayed_cmd_lock); 3137 list_for_each_entry_safe(cmd_p, cmd_tmp, 3138 &dev->delayed_cmd_list, se_delayed_node) { 3139 3140 list_del(&cmd_p->se_delayed_node); 3141 spin_unlock(&dev->delayed_cmd_lock); 3142 3143 pr_debug("Calling add_tasks() for" 3144 " cmd_p: 0x%02x Task Attr: 0x%02x" 3145 " Dormant -> Active, se_ordered_id: %u\n", 3146 cmd_p->t_task_cdb[0], 3147 cmd_p->sam_task_attr, cmd_p->se_ordered_id); 3148 3149 transport_add_tasks_from_cmd(cmd_p); 3150 new_active_tasks++; 3151 3152 spin_lock(&dev->delayed_cmd_lock); 3153 if (cmd_p->sam_task_attr == MSG_ORDERED_TAG) 3154 break; 3155 } 3156 spin_unlock(&dev->delayed_cmd_lock); 3157 /* 3158 * If new tasks have become active, wake up the transport thread 3159 * to do the processing of the Active tasks. 3160 */ 3161 if (new_active_tasks != 0) 3162 wake_up_interruptible(&dev->dev_queue_obj.thread_wq); 3163 } 3164 3165 static void transport_complete_qf(struct se_cmd *cmd) 3166 { 3167 int ret = 0; 3168 3169 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3170 transport_complete_task_attr(cmd); 3171 3172 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 3173 ret = cmd->se_tfo->queue_status(cmd); 3174 if (ret) 3175 goto out; 3176 } 3177 3178 switch (cmd->data_direction) { 3179 case DMA_FROM_DEVICE: 3180 ret = cmd->se_tfo->queue_data_in(cmd); 3181 break; 3182 case DMA_TO_DEVICE: 3183 if (cmd->t_bidi_data_sg) { 3184 ret = cmd->se_tfo->queue_data_in(cmd); 3185 if (ret < 0) 3186 break; 3187 } 3188 /* Fall through for DMA_TO_DEVICE */ 3189 case DMA_NONE: 3190 ret = cmd->se_tfo->queue_status(cmd); 3191 break; 3192 default: 3193 break; 3194 } 3195 3196 out: 3197 if (ret < 0) { 3198 transport_handle_queue_full(cmd, cmd->se_dev); 3199 return; 3200 } 3201 transport_lun_remove_cmd(cmd); 3202 transport_cmd_check_stop_to_fabric(cmd); 3203 } 3204 3205 static void transport_handle_queue_full( 3206 struct se_cmd *cmd, 3207 struct se_device *dev) 3208 { 3209 spin_lock_irq(&dev->qf_cmd_lock); 3210 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 3211 atomic_inc(&dev->dev_qf_count); 3212 smp_mb__after_atomic_inc(); 3213 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 3214 3215 schedule_work(&cmd->se_dev->qf_work_queue); 3216 } 3217 3218 static void target_complete_ok_work(struct work_struct *work) 3219 { 3220 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 3221 int reason = 0, ret; 3222 3223 /* 3224 * Check if we need to move delayed/dormant tasks from cmds on the 3225 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task 3226 * Attribute. 3227 */ 3228 if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) 3229 transport_complete_task_attr(cmd); 3230 /* 3231 * Check to schedule QUEUE_FULL work, or execute an existing 3232 * cmd->transport_qf_callback() 3233 */ 3234 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) 3235 schedule_work(&cmd->se_dev->qf_work_queue); 3236 3237 /* 3238 * Check if we need to retrieve a sense buffer from 3239 * the struct se_cmd in question. 3240 */ 3241 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 3242 if (transport_get_sense_data(cmd) < 0) 3243 reason = TCM_NON_EXISTENT_LUN; 3244 3245 /* 3246 * Only set when an struct se_task->task_scsi_status returned 3247 * a non GOOD status. 3248 */ 3249 if (cmd->scsi_status) { 3250 ret = transport_send_check_condition_and_sense( 3251 cmd, reason, 1); 3252 if (ret == -EAGAIN || ret == -ENOMEM) 3253 goto queue_full; 3254 3255 transport_lun_remove_cmd(cmd); 3256 transport_cmd_check_stop_to_fabric(cmd); 3257 return; 3258 } 3259 } 3260 /* 3261 * Check for a callback, used by amongst other things 3262 * XDWRITE_READ_10 emulation. 3263 */ 3264 if (cmd->transport_complete_callback) 3265 cmd->transport_complete_callback(cmd); 3266 3267 switch (cmd->data_direction) { 3268 case DMA_FROM_DEVICE: 3269 spin_lock(&cmd->se_lun->lun_sep_lock); 3270 if (cmd->se_lun->lun_sep) { 3271 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 3272 cmd->data_length; 3273 } 3274 spin_unlock(&cmd->se_lun->lun_sep_lock); 3275 3276 ret = cmd->se_tfo->queue_data_in(cmd); 3277 if (ret == -EAGAIN || ret == -ENOMEM) 3278 goto queue_full; 3279 break; 3280 case DMA_TO_DEVICE: 3281 spin_lock(&cmd->se_lun->lun_sep_lock); 3282 if (cmd->se_lun->lun_sep) { 3283 cmd->se_lun->lun_sep->sep_stats.rx_data_octets += 3284 cmd->data_length; 3285 } 3286 spin_unlock(&cmd->se_lun->lun_sep_lock); 3287 /* 3288 * Check if we need to send READ payload for BIDI-COMMAND 3289 */ 3290 if (cmd->t_bidi_data_sg) { 3291 spin_lock(&cmd->se_lun->lun_sep_lock); 3292 if (cmd->se_lun->lun_sep) { 3293 cmd->se_lun->lun_sep->sep_stats.tx_data_octets += 3294 cmd->data_length; 3295 } 3296 spin_unlock(&cmd->se_lun->lun_sep_lock); 3297 ret = cmd->se_tfo->queue_data_in(cmd); 3298 if (ret == -EAGAIN || ret == -ENOMEM) 3299 goto queue_full; 3300 break; 3301 } 3302 /* Fall through for DMA_TO_DEVICE */ 3303 case DMA_NONE: 3304 ret = cmd->se_tfo->queue_status(cmd); 3305 if (ret == -EAGAIN || ret == -ENOMEM) 3306 goto queue_full; 3307 break; 3308 default: 3309 break; 3310 } 3311 3312 transport_lun_remove_cmd(cmd); 3313 transport_cmd_check_stop_to_fabric(cmd); 3314 return; 3315 3316 queue_full: 3317 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," 3318 " data_direction: %d\n", cmd, cmd->data_direction); 3319 cmd->t_state = TRANSPORT_COMPLETE_QF_OK; 3320 transport_handle_queue_full(cmd, cmd->se_dev); 3321 } 3322 3323 static void transport_free_dev_tasks(struct se_cmd *cmd) 3324 { 3325 struct se_task *task, *task_tmp; 3326 unsigned long flags; 3327 LIST_HEAD(dispose_list); 3328 3329 spin_lock_irqsave(&cmd->t_state_lock, flags); 3330 list_for_each_entry_safe(task, task_tmp, 3331 &cmd->t_task_list, t_list) { 3332 if (!(task->task_flags & TF_ACTIVE)) 3333 list_move_tail(&task->t_list, &dispose_list); 3334 } 3335 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3336 3337 while (!list_empty(&dispose_list)) { 3338 task = list_first_entry(&dispose_list, struct se_task, t_list); 3339 3340 if (task->task_sg != cmd->t_data_sg && 3341 task->task_sg != cmd->t_bidi_data_sg) 3342 kfree(task->task_sg); 3343 3344 list_del(&task->t_list); 3345 3346 cmd->se_dev->transport->free_task(task); 3347 } 3348 } 3349 3350 static inline void transport_free_sgl(struct scatterlist *sgl, int nents) 3351 { 3352 struct scatterlist *sg; 3353 int count; 3354 3355 for_each_sg(sgl, sg, nents, count) 3356 __free_page(sg_page(sg)); 3357 3358 kfree(sgl); 3359 } 3360 3361 static inline void transport_free_pages(struct se_cmd *cmd) 3362 { 3363 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) 3364 return; 3365 3366 transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); 3367 cmd->t_data_sg = NULL; 3368 cmd->t_data_nents = 0; 3369 3370 transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); 3371 cmd->t_bidi_data_sg = NULL; 3372 cmd->t_bidi_data_nents = 0; 3373 } 3374 3375 /** 3376 * transport_release_cmd - free a command 3377 * @cmd: command to free 3378 * 3379 * This routine unconditionally frees a command, and reference counting 3380 * or list removal must be done in the caller. 3381 */ 3382 static void transport_release_cmd(struct se_cmd *cmd) 3383 { 3384 BUG_ON(!cmd->se_tfo); 3385 3386 if (cmd->se_tmr_req) 3387 core_tmr_release_req(cmd->se_tmr_req); 3388 if (cmd->t_task_cdb != cmd->__t_task_cdb) 3389 kfree(cmd->t_task_cdb); 3390 /* 3391 * If this cmd has been setup with target_get_sess_cmd(), drop 3392 * the kref and call ->release_cmd() in kref callback. 3393 */ 3394 if (cmd->check_release != 0) { 3395 target_put_sess_cmd(cmd->se_sess, cmd); 3396 return; 3397 } 3398 cmd->se_tfo->release_cmd(cmd); 3399 } 3400 3401 /** 3402 * transport_put_cmd - release a reference to a command 3403 * @cmd: command to release 3404 * 3405 * This routine releases our reference to the command and frees it if possible. 3406 */ 3407 static void transport_put_cmd(struct se_cmd *cmd) 3408 { 3409 unsigned long flags; 3410 int free_tasks = 0; 3411 3412 spin_lock_irqsave(&cmd->t_state_lock, flags); 3413 if (atomic_read(&cmd->t_fe_count)) { 3414 if (!atomic_dec_and_test(&cmd->t_fe_count)) 3415 goto out_busy; 3416 } 3417 3418 if (atomic_read(&cmd->t_se_count)) { 3419 if (!atomic_dec_and_test(&cmd->t_se_count)) 3420 goto out_busy; 3421 } 3422 3423 if (atomic_read(&cmd->transport_dev_active)) { 3424 atomic_set(&cmd->transport_dev_active, 0); 3425 transport_all_task_dev_remove_state(cmd); 3426 free_tasks = 1; 3427 } 3428 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3429 3430 if (free_tasks != 0) 3431 transport_free_dev_tasks(cmd); 3432 3433 transport_free_pages(cmd); 3434 transport_release_cmd(cmd); 3435 return; 3436 out_busy: 3437 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3438 } 3439 3440 /* 3441 * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of 3442 * allocating in the core. 3443 * @cmd: Associated se_cmd descriptor 3444 * @mem: SGL style memory for TCM WRITE / READ 3445 * @sg_mem_num: Number of SGL elements 3446 * @mem_bidi_in: SGL style memory for TCM BIDI READ 3447 * @sg_mem_bidi_num: Number of BIDI READ SGL elements 3448 * 3449 * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage 3450 * of parameters. 3451 */ 3452 int transport_generic_map_mem_to_cmd( 3453 struct se_cmd *cmd, 3454 struct scatterlist *sgl, 3455 u32 sgl_count, 3456 struct scatterlist *sgl_bidi, 3457 u32 sgl_bidi_count) 3458 { 3459 if (!sgl || !sgl_count) 3460 return 0; 3461 3462 if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || 3463 (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { 3464 /* 3465 * Reject SCSI data overflow with map_mem_to_cmd() as incoming 3466 * scatterlists already have been set to follow what the fabric 3467 * passes for the original expected data transfer length. 3468 */ 3469 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 3470 pr_warn("Rejecting SCSI DATA overflow for fabric using" 3471 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); 3472 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3473 cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; 3474 return -EINVAL; 3475 } 3476 3477 cmd->t_data_sg = sgl; 3478 cmd->t_data_nents = sgl_count; 3479 3480 if (sgl_bidi && sgl_bidi_count) { 3481 cmd->t_bidi_data_sg = sgl_bidi; 3482 cmd->t_bidi_data_nents = sgl_bidi_count; 3483 } 3484 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 3485 } 3486 3487 return 0; 3488 } 3489 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); 3490 3491 void *transport_kmap_data_sg(struct se_cmd *cmd) 3492 { 3493 struct scatterlist *sg = cmd->t_data_sg; 3494 struct page **pages; 3495 int i; 3496 3497 BUG_ON(!sg); 3498 /* 3499 * We need to take into account a possible offset here for fabrics like 3500 * tcm_loop who may be using a contig buffer from the SCSI midlayer for 3501 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() 3502 */ 3503 if (!cmd->t_data_nents) 3504 return NULL; 3505 else if (cmd->t_data_nents == 1) 3506 return kmap(sg_page(sg)) + sg->offset; 3507 3508 /* >1 page. use vmap */ 3509 pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); 3510 if (!pages) 3511 return NULL; 3512 3513 /* convert sg[] to pages[] */ 3514 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { 3515 pages[i] = sg_page(sg); 3516 } 3517 3518 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); 3519 kfree(pages); 3520 if (!cmd->t_data_vmap) 3521 return NULL; 3522 3523 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; 3524 } 3525 EXPORT_SYMBOL(transport_kmap_data_sg); 3526 3527 void transport_kunmap_data_sg(struct se_cmd *cmd) 3528 { 3529 if (!cmd->t_data_nents) 3530 return; 3531 else if (cmd->t_data_nents == 1) 3532 kunmap(sg_page(cmd->t_data_sg)); 3533 3534 vunmap(cmd->t_data_vmap); 3535 cmd->t_data_vmap = NULL; 3536 } 3537 EXPORT_SYMBOL(transport_kunmap_data_sg); 3538 3539 static int 3540 transport_generic_get_mem(struct se_cmd *cmd) 3541 { 3542 u32 length = cmd->data_length; 3543 unsigned int nents; 3544 struct page *page; 3545 gfp_t zero_flag; 3546 int i = 0; 3547 3548 nents = DIV_ROUND_UP(length, PAGE_SIZE); 3549 cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); 3550 if (!cmd->t_data_sg) 3551 return -ENOMEM; 3552 3553 cmd->t_data_nents = nents; 3554 sg_init_table(cmd->t_data_sg, nents); 3555 3556 zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO; 3557 3558 while (length) { 3559 u32 page_len = min_t(u32, length, PAGE_SIZE); 3560 page = alloc_page(GFP_KERNEL | zero_flag); 3561 if (!page) 3562 goto out; 3563 3564 sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); 3565 length -= page_len; 3566 i++; 3567 } 3568 return 0; 3569 3570 out: 3571 while (i >= 0) { 3572 __free_page(sg_page(&cmd->t_data_sg[i])); 3573 i--; 3574 } 3575 kfree(cmd->t_data_sg); 3576 cmd->t_data_sg = NULL; 3577 return -ENOMEM; 3578 } 3579 3580 /* Reduce sectors if they are too long for the device */ 3581 static inline sector_t transport_limit_task_sectors( 3582 struct se_device *dev, 3583 unsigned long long lba, 3584 sector_t sectors) 3585 { 3586 sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); 3587 3588 if (dev->transport->get_device_type(dev) == TYPE_DISK) 3589 if ((lba + sectors) > transport_dev_end_lba(dev)) 3590 sectors = ((transport_dev_end_lba(dev) - lba) + 1); 3591 3592 return sectors; 3593 } 3594 3595 3596 /* 3597 * This function can be used by HW target mode drivers to create a linked 3598 * scatterlist from all contiguously allocated struct se_task->task_sg[]. 3599 * This is intended to be called during the completion path by TCM Core 3600 * when struct target_core_fabric_ops->check_task_sg_chaining is enabled. 3601 */ 3602 void transport_do_task_sg_chain(struct se_cmd *cmd) 3603 { 3604 struct scatterlist *sg_first = NULL; 3605 struct scatterlist *sg_prev = NULL; 3606 int sg_prev_nents = 0; 3607 struct scatterlist *sg; 3608 struct se_task *task; 3609 u32 chained_nents = 0; 3610 int i; 3611 3612 BUG_ON(!cmd->se_tfo->task_sg_chaining); 3613 3614 /* 3615 * Walk the struct se_task list and setup scatterlist chains 3616 * for each contiguously allocated struct se_task->task_sg[]. 3617 */ 3618 list_for_each_entry(task, &cmd->t_task_list, t_list) { 3619 if (!task->task_sg) 3620 continue; 3621 3622 if (!sg_first) { 3623 sg_first = task->task_sg; 3624 chained_nents = task->task_sg_nents; 3625 } else { 3626 sg_chain(sg_prev, sg_prev_nents, task->task_sg); 3627 chained_nents += task->task_sg_nents; 3628 } 3629 /* 3630 * For the padded tasks, use the extra SGL vector allocated 3631 * in transport_allocate_data_tasks() for the sg_prev_nents 3632 * offset into sg_chain() above. 3633 * 3634 * We do not need the padding for the last task (or a single 3635 * task), but in that case we will never use the sg_prev_nents 3636 * value below which would be incorrect. 3637 */ 3638 sg_prev_nents = (task->task_sg_nents + 1); 3639 sg_prev = task->task_sg; 3640 } 3641 /* 3642 * Setup the starting pointer and total t_tasks_sg_linked_no including 3643 * padding SGs for linking and to mark the end. 3644 */ 3645 cmd->t_tasks_sg_chained = sg_first; 3646 cmd->t_tasks_sg_chained_no = chained_nents; 3647 3648 pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" 3649 " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, 3650 cmd->t_tasks_sg_chained_no); 3651 3652 for_each_sg(cmd->t_tasks_sg_chained, sg, 3653 cmd->t_tasks_sg_chained_no, i) { 3654 3655 pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", 3656 i, sg, sg_page(sg), sg->length, sg->offset); 3657 if (sg_is_chain(sg)) 3658 pr_debug("SG: %p sg_is_chain=1\n", sg); 3659 if (sg_is_last(sg)) 3660 pr_debug("SG: %p sg_is_last=1\n", sg); 3661 } 3662 } 3663 EXPORT_SYMBOL(transport_do_task_sg_chain); 3664 3665 /* 3666 * Break up cmd into chunks transport can handle 3667 */ 3668 static int 3669 transport_allocate_data_tasks(struct se_cmd *cmd, 3670 enum dma_data_direction data_direction, 3671 struct scatterlist *cmd_sg, unsigned int sgl_nents) 3672 { 3673 struct se_device *dev = cmd->se_dev; 3674 int task_count, i; 3675 unsigned long long lba; 3676 sector_t sectors, dev_max_sectors; 3677 u32 sector_size; 3678 3679 if (transport_cmd_get_valid_sectors(cmd) < 0) 3680 return -EINVAL; 3681 3682 dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; 3683 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; 3684 3685 WARN_ON(cmd->data_length % sector_size); 3686 3687 lba = cmd->t_task_lba; 3688 sectors = DIV_ROUND_UP(cmd->data_length, sector_size); 3689 task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); 3690 3691 /* 3692 * If we need just a single task reuse the SG list in the command 3693 * and avoid a lot of work. 3694 */ 3695 if (task_count == 1) { 3696 struct se_task *task; 3697 unsigned long flags; 3698 3699 task = transport_generic_get_task(cmd, data_direction); 3700 if (!task) 3701 return -ENOMEM; 3702 3703 task->task_sg = cmd_sg; 3704 task->task_sg_nents = sgl_nents; 3705 3706 task->task_lba = lba; 3707 task->task_sectors = sectors; 3708 task->task_size = task->task_sectors * sector_size; 3709 3710 spin_lock_irqsave(&cmd->t_state_lock, flags); 3711 list_add_tail(&task->t_list, &cmd->t_task_list); 3712 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3713 3714 return task_count; 3715 } 3716 3717 for (i = 0; i < task_count; i++) { 3718 struct se_task *task; 3719 unsigned int task_size, task_sg_nents_padded; 3720 struct scatterlist *sg; 3721 unsigned long flags; 3722 int count; 3723 3724 task = transport_generic_get_task(cmd, data_direction); 3725 if (!task) 3726 return -ENOMEM; 3727 3728 task->task_lba = lba; 3729 task->task_sectors = min(sectors, dev_max_sectors); 3730 task->task_size = task->task_sectors * sector_size; 3731 3732 /* 3733 * This now assumes that passed sg_ents are in PAGE_SIZE chunks 3734 * in order to calculate the number per task SGL entries 3735 */ 3736 task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); 3737 /* 3738 * Check if the fabric module driver is requesting that all 3739 * struct se_task->task_sg[] be chained together.. If so, 3740 * then allocate an extra padding SG entry for linking and 3741 * marking the end of the chained SGL for every task except 3742 * the last one for (task_count > 1) operation, or skipping 3743 * the extra padding for the (task_count == 1) case. 3744 */ 3745 if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { 3746 task_sg_nents_padded = (task->task_sg_nents + 1); 3747 } else 3748 task_sg_nents_padded = task->task_sg_nents; 3749 3750 task->task_sg = kmalloc(sizeof(struct scatterlist) * 3751 task_sg_nents_padded, GFP_KERNEL); 3752 if (!task->task_sg) { 3753 cmd->se_dev->transport->free_task(task); 3754 return -ENOMEM; 3755 } 3756 3757 sg_init_table(task->task_sg, task_sg_nents_padded); 3758 3759 task_size = task->task_size; 3760 3761 /* Build new sgl, only up to task_size */ 3762 for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { 3763 if (cmd_sg->length > task_size) 3764 break; 3765 3766 *sg = *cmd_sg; 3767 task_size -= cmd_sg->length; 3768 cmd_sg = sg_next(cmd_sg); 3769 } 3770 3771 lba += task->task_sectors; 3772 sectors -= task->task_sectors; 3773 3774 spin_lock_irqsave(&cmd->t_state_lock, flags); 3775 list_add_tail(&task->t_list, &cmd->t_task_list); 3776 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3777 } 3778 3779 return task_count; 3780 } 3781 3782 static int 3783 transport_allocate_control_task(struct se_cmd *cmd) 3784 { 3785 struct se_task *task; 3786 unsigned long flags; 3787 3788 /* Workaround for handling zero-length control CDBs */ 3789 if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && 3790 !cmd->data_length) 3791 return 0; 3792 3793 task = transport_generic_get_task(cmd, cmd->data_direction); 3794 if (!task) 3795 return -ENOMEM; 3796 3797 task->task_sg = cmd->t_data_sg; 3798 task->task_size = cmd->data_length; 3799 task->task_sg_nents = cmd->t_data_nents; 3800 3801 spin_lock_irqsave(&cmd->t_state_lock, flags); 3802 list_add_tail(&task->t_list, &cmd->t_task_list); 3803 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3804 3805 /* Success! Return number of tasks allocated */ 3806 return 1; 3807 } 3808 3809 /* 3810 * Allocate any required ressources to execute the command, and either place 3811 * it on the execution queue if possible. For writes we might not have the 3812 * payload yet, thus notify the fabric via a call to ->write_pending instead. 3813 */ 3814 int transport_generic_new_cmd(struct se_cmd *cmd) 3815 { 3816 struct se_device *dev = cmd->se_dev; 3817 int task_cdbs, task_cdbs_bidi = 0; 3818 int set_counts = 1; 3819 int ret = 0; 3820 3821 /* 3822 * Determine is the TCM fabric module has already allocated physical 3823 * memory, and is directly calling transport_generic_map_mem_to_cmd() 3824 * beforehand. 3825 */ 3826 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && 3827 cmd->data_length) { 3828 ret = transport_generic_get_mem(cmd); 3829 if (ret < 0) 3830 goto out_fail; 3831 } 3832 3833 /* 3834 * For BIDI command set up the read tasks first. 3835 */ 3836 if (cmd->t_bidi_data_sg && 3837 dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { 3838 BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)); 3839 3840 task_cdbs_bidi = transport_allocate_data_tasks(cmd, 3841 DMA_FROM_DEVICE, cmd->t_bidi_data_sg, 3842 cmd->t_bidi_data_nents); 3843 if (task_cdbs_bidi <= 0) 3844 goto out_fail; 3845 3846 atomic_inc(&cmd->t_fe_count); 3847 atomic_inc(&cmd->t_se_count); 3848 set_counts = 0; 3849 } 3850 3851 if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { 3852 task_cdbs = transport_allocate_data_tasks(cmd, 3853 cmd->data_direction, cmd->t_data_sg, 3854 cmd->t_data_nents); 3855 } else { 3856 task_cdbs = transport_allocate_control_task(cmd); 3857 } 3858 3859 if (task_cdbs < 0) 3860 goto out_fail; 3861 else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { 3862 cmd->t_state = TRANSPORT_COMPLETE; 3863 atomic_set(&cmd->t_transport_active, 1); 3864 3865 if (cmd->t_task_cdb[0] == REQUEST_SENSE) { 3866 u8 ua_asc = 0, ua_ascq = 0; 3867 3868 core_scsi3_ua_clear_for_request_sense(cmd, 3869 &ua_asc, &ua_ascq); 3870 } 3871 3872 INIT_WORK(&cmd->work, target_complete_ok_work); 3873 queue_work(target_completion_wq, &cmd->work); 3874 return 0; 3875 } 3876 3877 if (set_counts) { 3878 atomic_inc(&cmd->t_fe_count); 3879 atomic_inc(&cmd->t_se_count); 3880 } 3881 3882 cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); 3883 atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); 3884 atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); 3885 3886 /* 3887 * For WRITEs, let the fabric know its buffer is ready.. 3888 * This WRITE struct se_cmd (and all of its associated struct se_task's) 3889 * will be added to the struct se_device execution queue after its WRITE 3890 * data has arrived. (ie: It gets handled by the transport processing 3891 * thread a second time) 3892 */ 3893 if (cmd->data_direction == DMA_TO_DEVICE) { 3894 transport_add_tasks_to_state_queue(cmd); 3895 return transport_generic_write_pending(cmd); 3896 } 3897 /* 3898 * Everything else but a WRITE, add the struct se_cmd's struct se_task's 3899 * to the execution queue. 3900 */ 3901 transport_execute_tasks(cmd); 3902 return 0; 3903 3904 out_fail: 3905 cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 3906 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3907 return -EINVAL; 3908 } 3909 EXPORT_SYMBOL(transport_generic_new_cmd); 3910 3911 /* transport_generic_process_write(): 3912 * 3913 * 3914 */ 3915 void transport_generic_process_write(struct se_cmd *cmd) 3916 { 3917 transport_execute_tasks(cmd); 3918 } 3919 EXPORT_SYMBOL(transport_generic_process_write); 3920 3921 static void transport_write_pending_qf(struct se_cmd *cmd) 3922 { 3923 int ret; 3924 3925 ret = cmd->se_tfo->write_pending(cmd); 3926 if (ret == -EAGAIN || ret == -ENOMEM) { 3927 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", 3928 cmd); 3929 transport_handle_queue_full(cmd, cmd->se_dev); 3930 } 3931 } 3932 3933 static int transport_generic_write_pending(struct se_cmd *cmd) 3934 { 3935 unsigned long flags; 3936 int ret; 3937 3938 spin_lock_irqsave(&cmd->t_state_lock, flags); 3939 cmd->t_state = TRANSPORT_WRITE_PENDING; 3940 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3941 3942 /* 3943 * Clear the se_cmd for WRITE_PENDING status in order to set 3944 * cmd->t_transport_active=0 so that transport_generic_handle_data 3945 * can be called from HW target mode interrupt code. This is safe 3946 * to be called with transport_off=1 before the cmd->se_tfo->write_pending 3947 * because the se_cmd->se_lun pointer is not being cleared. 3948 */ 3949 transport_cmd_check_stop(cmd, 1, 0); 3950 3951 /* 3952 * Call the fabric write_pending function here to let the 3953 * frontend know that WRITE buffers are ready. 3954 */ 3955 ret = cmd->se_tfo->write_pending(cmd); 3956 if (ret == -EAGAIN || ret == -ENOMEM) 3957 goto queue_full; 3958 else if (ret < 0) 3959 return ret; 3960 3961 return 1; 3962 3963 queue_full: 3964 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); 3965 cmd->t_state = TRANSPORT_COMPLETE_QF_WP; 3966 transport_handle_queue_full(cmd, cmd->se_dev); 3967 return 0; 3968 } 3969 3970 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 3971 { 3972 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 3973 if (wait_for_tasks && cmd->se_tmr_req) 3974 transport_wait_for_tasks(cmd); 3975 3976 transport_release_cmd(cmd); 3977 } else { 3978 if (wait_for_tasks) 3979 transport_wait_for_tasks(cmd); 3980 3981 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); 3982 3983 if (cmd->se_lun) 3984 transport_lun_remove_cmd(cmd); 3985 3986 transport_free_dev_tasks(cmd); 3987 3988 transport_put_cmd(cmd); 3989 } 3990 } 3991 EXPORT_SYMBOL(transport_generic_free_cmd); 3992 3993 /* target_get_sess_cmd - Add command to active ->sess_cmd_list 3994 * @se_sess: session to reference 3995 * @se_cmd: command descriptor to add 3996 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() 3997 */ 3998 void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, 3999 bool ack_kref) 4000 { 4001 unsigned long flags; 4002 4003 kref_init(&se_cmd->cmd_kref); 4004 /* 4005 * Add a second kref if the fabric caller is expecting to handle 4006 * fabric acknowledgement that requires two target_put_sess_cmd() 4007 * invocations before se_cmd descriptor release. 4008 */ 4009 if (ack_kref == true) 4010 kref_get(&se_cmd->cmd_kref); 4011 4012 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 4013 list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); 4014 se_cmd->check_release = 1; 4015 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 4016 } 4017 EXPORT_SYMBOL(target_get_sess_cmd); 4018 4019 static void target_release_cmd_kref(struct kref *kref) 4020 { 4021 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 4022 struct se_session *se_sess = se_cmd->se_sess; 4023 unsigned long flags; 4024 4025 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 4026 if (list_empty(&se_cmd->se_cmd_list)) { 4027 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 4028 WARN_ON(1); 4029 return; 4030 } 4031 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 4032 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 4033 complete(&se_cmd->cmd_wait_comp); 4034 return; 4035 } 4036 list_del(&se_cmd->se_cmd_list); 4037 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 4038 4039 se_cmd->se_tfo->release_cmd(se_cmd); 4040 } 4041 4042 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put 4043 * @se_sess: session to reference 4044 * @se_cmd: command descriptor to drop 4045 */ 4046 int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 4047 { 4048 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 4049 } 4050 EXPORT_SYMBOL(target_put_sess_cmd); 4051 4052 /* target_splice_sess_cmd_list - Split active cmds into sess_wait_list 4053 * @se_sess: session to split 4054 */ 4055 void target_splice_sess_cmd_list(struct se_session *se_sess) 4056 { 4057 struct se_cmd *se_cmd; 4058 unsigned long flags; 4059 4060 WARN_ON(!list_empty(&se_sess->sess_wait_list)); 4061 INIT_LIST_HEAD(&se_sess->sess_wait_list); 4062 4063 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 4064 se_sess->sess_tearing_down = 1; 4065 4066 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 4067 4068 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 4069 se_cmd->cmd_wait_set = 1; 4070 4071 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 4072 } 4073 EXPORT_SYMBOL(target_splice_sess_cmd_list); 4074 4075 /* target_wait_for_sess_cmds - Wait for outstanding descriptors 4076 * @se_sess: session to wait for active I/O 4077 * @wait_for_tasks: Make extra transport_wait_for_tasks call 4078 */ 4079 void target_wait_for_sess_cmds( 4080 struct se_session *se_sess, 4081 int wait_for_tasks) 4082 { 4083 struct se_cmd *se_cmd, *tmp_cmd; 4084 bool rc = false; 4085 4086 list_for_each_entry_safe(se_cmd, tmp_cmd, 4087 &se_sess->sess_wait_list, se_cmd_list) { 4088 list_del(&se_cmd->se_cmd_list); 4089 4090 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 4091 " %d\n", se_cmd, se_cmd->t_state, 4092 se_cmd->se_tfo->get_cmd_state(se_cmd)); 4093 4094 if (wait_for_tasks) { 4095 pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," 4096 " fabric state: %d\n", se_cmd, se_cmd->t_state, 4097 se_cmd->se_tfo->get_cmd_state(se_cmd)); 4098 4099 rc = transport_wait_for_tasks(se_cmd); 4100 4101 pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d," 4102 " fabric state: %d\n", se_cmd, se_cmd->t_state, 4103 se_cmd->se_tfo->get_cmd_state(se_cmd)); 4104 } 4105 4106 if (!rc) { 4107 wait_for_completion(&se_cmd->cmd_wait_comp); 4108 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 4109 " fabric state: %d\n", se_cmd, se_cmd->t_state, 4110 se_cmd->se_tfo->get_cmd_state(se_cmd)); 4111 } 4112 4113 se_cmd->se_tfo->release_cmd(se_cmd); 4114 } 4115 } 4116 EXPORT_SYMBOL(target_wait_for_sess_cmds); 4117 4118 /* transport_lun_wait_for_tasks(): 4119 * 4120 * Called from ConfigFS context to stop the passed struct se_cmd to allow 4121 * an struct se_lun to be successfully shutdown. 4122 */ 4123 static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) 4124 { 4125 unsigned long flags; 4126 int ret; 4127 /* 4128 * If the frontend has already requested this struct se_cmd to 4129 * be stopped, we can safely ignore this struct se_cmd. 4130 */ 4131 spin_lock_irqsave(&cmd->t_state_lock, flags); 4132 if (atomic_read(&cmd->t_transport_stop)) { 4133 atomic_set(&cmd->transport_lun_stop, 0); 4134 pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop ==" 4135 " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); 4136 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4137 transport_cmd_check_stop(cmd, 1, 0); 4138 return -EPERM; 4139 } 4140 atomic_set(&cmd->transport_lun_fe_stop, 1); 4141 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4142 4143 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); 4144 4145 ret = transport_stop_tasks_for_cmd(cmd); 4146 4147 pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" 4148 " %d\n", cmd, cmd->t_task_list_num, ret); 4149 if (!ret) { 4150 pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", 4151 cmd->se_tfo->get_task_tag(cmd)); 4152 wait_for_completion(&cmd->transport_lun_stop_comp); 4153 pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", 4154 cmd->se_tfo->get_task_tag(cmd)); 4155 } 4156 transport_remove_cmd_from_queue(cmd); 4157 4158 return 0; 4159 } 4160 4161 static void __transport_clear_lun_from_sessions(struct se_lun *lun) 4162 { 4163 struct se_cmd *cmd = NULL; 4164 unsigned long lun_flags, cmd_flags; 4165 /* 4166 * Do exception processing and return CHECK_CONDITION status to the 4167 * Initiator Port. 4168 */ 4169 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4170 while (!list_empty(&lun->lun_cmd_list)) { 4171 cmd = list_first_entry(&lun->lun_cmd_list, 4172 struct se_cmd, se_lun_node); 4173 list_del(&cmd->se_lun_node); 4174 4175 atomic_set(&cmd->transport_lun_active, 0); 4176 /* 4177 * This will notify iscsi_target_transport.c: 4178 * transport_cmd_check_stop() that a LUN shutdown is in 4179 * progress for the iscsi_cmd_t. 4180 */ 4181 spin_lock(&cmd->t_state_lock); 4182 pr_debug("SE_LUN[%d] - Setting cmd->transport" 4183 "_lun_stop for ITT: 0x%08x\n", 4184 cmd->se_lun->unpacked_lun, 4185 cmd->se_tfo->get_task_tag(cmd)); 4186 atomic_set(&cmd->transport_lun_stop, 1); 4187 spin_unlock(&cmd->t_state_lock); 4188 4189 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 4190 4191 if (!cmd->se_lun) { 4192 pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", 4193 cmd->se_tfo->get_task_tag(cmd), 4194 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 4195 BUG(); 4196 } 4197 /* 4198 * If the Storage engine still owns the iscsi_cmd_t, determine 4199 * and/or stop its context. 4200 */ 4201 pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" 4202 "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, 4203 cmd->se_tfo->get_task_tag(cmd)); 4204 4205 if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { 4206 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4207 continue; 4208 } 4209 4210 pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" 4211 "_wait_for_tasks(): SUCCESS\n", 4212 cmd->se_lun->unpacked_lun, 4213 cmd->se_tfo->get_task_tag(cmd)); 4214 4215 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 4216 if (!atomic_read(&cmd->transport_dev_active)) { 4217 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4218 goto check_cond; 4219 } 4220 atomic_set(&cmd->transport_dev_active, 0); 4221 transport_all_task_dev_remove_state(cmd); 4222 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4223 4224 transport_free_dev_tasks(cmd); 4225 /* 4226 * The Storage engine stopped this struct se_cmd before it was 4227 * send to the fabric frontend for delivery back to the 4228 * Initiator Node. Return this SCSI CDB back with an 4229 * CHECK_CONDITION status. 4230 */ 4231 check_cond: 4232 transport_send_check_condition_and_sense(cmd, 4233 TCM_NON_EXISTENT_LUN, 0); 4234 /* 4235 * If the fabric frontend is waiting for this iscsi_cmd_t to 4236 * be released, notify the waiting thread now that LU has 4237 * finished accessing it. 4238 */ 4239 spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); 4240 if (atomic_read(&cmd->transport_lun_fe_stop)) { 4241 pr_debug("SE_LUN[%d] - Detected FE stop for" 4242 " struct se_cmd: %p ITT: 0x%08x\n", 4243 lun->unpacked_lun, 4244 cmd, cmd->se_tfo->get_task_tag(cmd)); 4245 4246 spin_unlock_irqrestore(&cmd->t_state_lock, 4247 cmd_flags); 4248 transport_cmd_check_stop(cmd, 1, 0); 4249 complete(&cmd->transport_lun_fe_stop_comp); 4250 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4251 continue; 4252 } 4253 pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", 4254 lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); 4255 4256 spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); 4257 spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); 4258 } 4259 spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); 4260 } 4261 4262 static int transport_clear_lun_thread(void *p) 4263 { 4264 struct se_lun *lun = p; 4265 4266 __transport_clear_lun_from_sessions(lun); 4267 complete(&lun->lun_shutdown_comp); 4268 4269 return 0; 4270 } 4271 4272 int transport_clear_lun_from_sessions(struct se_lun *lun) 4273 { 4274 struct task_struct *kt; 4275 4276 kt = kthread_run(transport_clear_lun_thread, lun, 4277 "tcm_cl_%u", lun->unpacked_lun); 4278 if (IS_ERR(kt)) { 4279 pr_err("Unable to start clear_lun thread\n"); 4280 return PTR_ERR(kt); 4281 } 4282 wait_for_completion(&lun->lun_shutdown_comp); 4283 4284 return 0; 4285 } 4286 4287 /** 4288 * transport_wait_for_tasks - wait for completion to occur 4289 * @cmd: command to wait 4290 * 4291 * Called from frontend fabric context to wait for storage engine 4292 * to pause and/or release frontend generated struct se_cmd. 4293 */ 4294 bool transport_wait_for_tasks(struct se_cmd *cmd) 4295 { 4296 unsigned long flags; 4297 4298 spin_lock_irqsave(&cmd->t_state_lock, flags); 4299 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) { 4300 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4301 return false; 4302 } 4303 /* 4304 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE 4305 * has been set in transport_set_supported_SAM_opcode(). 4306 */ 4307 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) { 4308 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4309 return false; 4310 } 4311 /* 4312 * If we are already stopped due to an external event (ie: LUN shutdown) 4313 * sleep until the connection can have the passed struct se_cmd back. 4314 * The cmd->transport_lun_stopped_sem will be upped by 4315 * transport_clear_lun_from_sessions() once the ConfigFS context caller 4316 * has completed its operation on the struct se_cmd. 4317 */ 4318 if (atomic_read(&cmd->transport_lun_stop)) { 4319 4320 pr_debug("wait_for_tasks: Stopping" 4321 " wait_for_completion(&cmd->t_tasktransport_lun_fe" 4322 "_stop_comp); for ITT: 0x%08x\n", 4323 cmd->se_tfo->get_task_tag(cmd)); 4324 /* 4325 * There is a special case for WRITES where a FE exception + 4326 * LUN shutdown means ConfigFS context is still sleeping on 4327 * transport_lun_stop_comp in transport_lun_wait_for_tasks(). 4328 * We go ahead and up transport_lun_stop_comp just to be sure 4329 * here. 4330 */ 4331 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4332 complete(&cmd->transport_lun_stop_comp); 4333 wait_for_completion(&cmd->transport_lun_fe_stop_comp); 4334 spin_lock_irqsave(&cmd->t_state_lock, flags); 4335 4336 transport_all_task_dev_remove_state(cmd); 4337 /* 4338 * At this point, the frontend who was the originator of this 4339 * struct se_cmd, now owns the structure and can be released through 4340 * normal means below. 4341 */ 4342 pr_debug("wait_for_tasks: Stopped" 4343 " wait_for_completion(&cmd->t_tasktransport_lun_fe_" 4344 "stop_comp); for ITT: 0x%08x\n", 4345 cmd->se_tfo->get_task_tag(cmd)); 4346 4347 atomic_set(&cmd->transport_lun_stop, 0); 4348 } 4349 if (!atomic_read(&cmd->t_transport_active) || 4350 atomic_read(&cmd->t_transport_aborted)) { 4351 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4352 return false; 4353 } 4354 4355 atomic_set(&cmd->t_transport_stop, 1); 4356 4357 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" 4358 " i_state: %d, t_state: %d, t_transport_stop = TRUE\n", 4359 cmd, cmd->se_tfo->get_task_tag(cmd), 4360 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 4361 4362 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4363 4364 wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); 4365 4366 wait_for_completion(&cmd->t_transport_stop_comp); 4367 4368 spin_lock_irqsave(&cmd->t_state_lock, flags); 4369 atomic_set(&cmd->t_transport_active, 0); 4370 atomic_set(&cmd->t_transport_stop, 0); 4371 4372 pr_debug("wait_for_tasks: Stopped wait_for_compltion(" 4373 "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", 4374 cmd->se_tfo->get_task_tag(cmd)); 4375 4376 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4377 4378 return true; 4379 } 4380 EXPORT_SYMBOL(transport_wait_for_tasks); 4381 4382 static int transport_get_sense_codes( 4383 struct se_cmd *cmd, 4384 u8 *asc, 4385 u8 *ascq) 4386 { 4387 *asc = cmd->scsi_asc; 4388 *ascq = cmd->scsi_ascq; 4389 4390 return 0; 4391 } 4392 4393 static int transport_set_sense_codes( 4394 struct se_cmd *cmd, 4395 u8 asc, 4396 u8 ascq) 4397 { 4398 cmd->scsi_asc = asc; 4399 cmd->scsi_ascq = ascq; 4400 4401 return 0; 4402 } 4403 4404 int transport_send_check_condition_and_sense( 4405 struct se_cmd *cmd, 4406 u8 reason, 4407 int from_transport) 4408 { 4409 unsigned char *buffer = cmd->sense_buffer; 4410 unsigned long flags; 4411 int offset; 4412 u8 asc = 0, ascq = 0; 4413 4414 spin_lock_irqsave(&cmd->t_state_lock, flags); 4415 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 4416 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4417 return 0; 4418 } 4419 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; 4420 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4421 4422 if (!reason && from_transport) 4423 goto after_reason; 4424 4425 if (!from_transport) 4426 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; 4427 /* 4428 * Data Segment and SenseLength of the fabric response PDU. 4429 * 4430 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE 4431 * from include/scsi/scsi_cmnd.h 4432 */ 4433 offset = cmd->se_tfo->set_fabric_sense_len(cmd, 4434 TRANSPORT_SENSE_BUFFER); 4435 /* 4436 * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses 4437 * SENSE KEY values from include/scsi/scsi.h 4438 */ 4439 switch (reason) { 4440 case TCM_NON_EXISTENT_LUN: 4441 /* CURRENT ERROR */ 4442 buffer[offset] = 0x70; 4443 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4444 /* ILLEGAL REQUEST */ 4445 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4446 /* LOGICAL UNIT NOT SUPPORTED */ 4447 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; 4448 break; 4449 case TCM_UNSUPPORTED_SCSI_OPCODE: 4450 case TCM_SECTOR_COUNT_TOO_MANY: 4451 /* CURRENT ERROR */ 4452 buffer[offset] = 0x70; 4453 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4454 /* ILLEGAL REQUEST */ 4455 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4456 /* INVALID COMMAND OPERATION CODE */ 4457 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20; 4458 break; 4459 case TCM_UNKNOWN_MODE_PAGE: 4460 /* CURRENT ERROR */ 4461 buffer[offset] = 0x70; 4462 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4463 /* ILLEGAL REQUEST */ 4464 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4465 /* INVALID FIELD IN CDB */ 4466 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; 4467 break; 4468 case TCM_CHECK_CONDITION_ABORT_CMD: 4469 /* CURRENT ERROR */ 4470 buffer[offset] = 0x70; 4471 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4472 /* ABORTED COMMAND */ 4473 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4474 /* BUS DEVICE RESET FUNCTION OCCURRED */ 4475 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29; 4476 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03; 4477 break; 4478 case TCM_INCORRECT_AMOUNT_OF_DATA: 4479 /* CURRENT ERROR */ 4480 buffer[offset] = 0x70; 4481 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4482 /* ABORTED COMMAND */ 4483 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4484 /* WRITE ERROR */ 4485 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; 4486 /* NOT ENOUGH UNSOLICITED DATA */ 4487 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d; 4488 break; 4489 case TCM_INVALID_CDB_FIELD: 4490 /* CURRENT ERROR */ 4491 buffer[offset] = 0x70; 4492 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4493 /* ILLEGAL REQUEST */ 4494 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4495 /* INVALID FIELD IN CDB */ 4496 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; 4497 break; 4498 case TCM_INVALID_PARAMETER_LIST: 4499 /* CURRENT ERROR */ 4500 buffer[offset] = 0x70; 4501 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4502 /* ILLEGAL REQUEST */ 4503 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4504 /* INVALID FIELD IN PARAMETER LIST */ 4505 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; 4506 break; 4507 case TCM_UNEXPECTED_UNSOLICITED_DATA: 4508 /* CURRENT ERROR */ 4509 buffer[offset] = 0x70; 4510 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4511 /* ABORTED COMMAND */ 4512 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4513 /* WRITE ERROR */ 4514 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c; 4515 /* UNEXPECTED_UNSOLICITED_DATA */ 4516 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c; 4517 break; 4518 case TCM_SERVICE_CRC_ERROR: 4519 /* CURRENT ERROR */ 4520 buffer[offset] = 0x70; 4521 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4522 /* ABORTED COMMAND */ 4523 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4524 /* PROTOCOL SERVICE CRC ERROR */ 4525 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47; 4526 /* N/A */ 4527 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05; 4528 break; 4529 case TCM_SNACK_REJECTED: 4530 /* CURRENT ERROR */ 4531 buffer[offset] = 0x70; 4532 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4533 /* ABORTED COMMAND */ 4534 buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; 4535 /* READ ERROR */ 4536 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11; 4537 /* FAILED RETRANSMISSION REQUEST */ 4538 buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13; 4539 break; 4540 case TCM_WRITE_PROTECTED: 4541 /* CURRENT ERROR */ 4542 buffer[offset] = 0x70; 4543 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4544 /* DATA PROTECT */ 4545 buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; 4546 /* WRITE PROTECTED */ 4547 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; 4548 break; 4549 case TCM_CHECK_CONDITION_UNIT_ATTENTION: 4550 /* CURRENT ERROR */ 4551 buffer[offset] = 0x70; 4552 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4553 /* UNIT ATTENTION */ 4554 buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 4555 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); 4556 buffer[offset+SPC_ASC_KEY_OFFSET] = asc; 4557 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; 4558 break; 4559 case TCM_CHECK_CONDITION_NOT_READY: 4560 /* CURRENT ERROR */ 4561 buffer[offset] = 0x70; 4562 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4563 /* Not Ready */ 4564 buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; 4565 transport_get_sense_codes(cmd, &asc, &ascq); 4566 buffer[offset+SPC_ASC_KEY_OFFSET] = asc; 4567 buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq; 4568 break; 4569 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: 4570 default: 4571 /* CURRENT ERROR */ 4572 buffer[offset] = 0x70; 4573 buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; 4574 /* ILLEGAL REQUEST */ 4575 buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; 4576 /* LOGICAL UNIT COMMUNICATION FAILURE */ 4577 buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80; 4578 break; 4579 } 4580 /* 4581 * This code uses linux/include/scsi/scsi.h SAM status codes! 4582 */ 4583 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 4584 /* 4585 * Automatically padded, this value is encoded in the fabric's 4586 * data_length response PDU containing the SCSI defined sense data. 4587 */ 4588 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; 4589 4590 after_reason: 4591 return cmd->se_tfo->queue_status(cmd); 4592 } 4593 EXPORT_SYMBOL(transport_send_check_condition_and_sense); 4594 4595 int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 4596 { 4597 int ret = 0; 4598 4599 if (atomic_read(&cmd->t_transport_aborted) != 0) { 4600 if (!send_status || 4601 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) 4602 return 1; 4603 #if 0 4604 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" 4605 " status for CDB: 0x%02x ITT: 0x%08x\n", 4606 cmd->t_task_cdb[0], 4607 cmd->se_tfo->get_task_tag(cmd)); 4608 #endif 4609 cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; 4610 cmd->se_tfo->queue_status(cmd); 4611 ret = 1; 4612 } 4613 return ret; 4614 } 4615 EXPORT_SYMBOL(transport_check_aborted_status); 4616 4617 void transport_send_task_abort(struct se_cmd *cmd) 4618 { 4619 unsigned long flags; 4620 4621 spin_lock_irqsave(&cmd->t_state_lock, flags); 4622 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { 4623 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4624 return; 4625 } 4626 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 4627 4628 /* 4629 * If there are still expected incoming fabric WRITEs, we wait 4630 * until until they have completed before sending a TASK_ABORTED 4631 * response. This response with TASK_ABORTED status will be 4632 * queued back to fabric module by transport_check_aborted_status(). 4633 */ 4634 if (cmd->data_direction == DMA_TO_DEVICE) { 4635 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 4636 atomic_inc(&cmd->t_transport_aborted); 4637 smp_mb__after_atomic_inc(); 4638 } 4639 } 4640 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 4641 #if 0 4642 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," 4643 " ITT: 0x%08x\n", cmd->t_task_cdb[0], 4644 cmd->se_tfo->get_task_tag(cmd)); 4645 #endif 4646 cmd->se_tfo->queue_status(cmd); 4647 } 4648 4649 static int transport_generic_do_tmr(struct se_cmd *cmd) 4650 { 4651 struct se_device *dev = cmd->se_dev; 4652 struct se_tmr_req *tmr = cmd->se_tmr_req; 4653 int ret; 4654 4655 switch (tmr->function) { 4656 case TMR_ABORT_TASK: 4657 tmr->response = TMR_FUNCTION_REJECTED; 4658 break; 4659 case TMR_ABORT_TASK_SET: 4660 case TMR_CLEAR_ACA: 4661 case TMR_CLEAR_TASK_SET: 4662 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 4663 break; 4664 case TMR_LUN_RESET: 4665 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); 4666 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : 4667 TMR_FUNCTION_REJECTED; 4668 break; 4669 case TMR_TARGET_WARM_RESET: 4670 tmr->response = TMR_FUNCTION_REJECTED; 4671 break; 4672 case TMR_TARGET_COLD_RESET: 4673 tmr->response = TMR_FUNCTION_REJECTED; 4674 break; 4675 default: 4676 pr_err("Uknown TMR function: 0x%02x.\n", 4677 tmr->function); 4678 tmr->response = TMR_FUNCTION_REJECTED; 4679 break; 4680 } 4681 4682 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 4683 cmd->se_tfo->queue_tm_rsp(cmd); 4684 4685 transport_cmd_check_stop_to_fabric(cmd); 4686 return 0; 4687 } 4688 4689 /* transport_processing_thread(): 4690 * 4691 * 4692 */ 4693 static int transport_processing_thread(void *param) 4694 { 4695 int ret; 4696 struct se_cmd *cmd; 4697 struct se_device *dev = param; 4698 4699 while (!kthread_should_stop()) { 4700 ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, 4701 atomic_read(&dev->dev_queue_obj.queue_cnt) || 4702 kthread_should_stop()); 4703 if (ret < 0) 4704 goto out; 4705 4706 get_cmd: 4707 cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); 4708 if (!cmd) 4709 continue; 4710 4711 switch (cmd->t_state) { 4712 case TRANSPORT_NEW_CMD: 4713 BUG(); 4714 break; 4715 case TRANSPORT_NEW_CMD_MAP: 4716 if (!cmd->se_tfo->new_cmd_map) { 4717 pr_err("cmd->se_tfo->new_cmd_map is" 4718 " NULL for TRANSPORT_NEW_CMD_MAP\n"); 4719 BUG(); 4720 } 4721 ret = cmd->se_tfo->new_cmd_map(cmd); 4722 if (ret < 0) { 4723 transport_generic_request_failure(cmd); 4724 break; 4725 } 4726 ret = transport_generic_new_cmd(cmd); 4727 if (ret < 0) { 4728 transport_generic_request_failure(cmd); 4729 break; 4730 } 4731 break; 4732 case TRANSPORT_PROCESS_WRITE: 4733 transport_generic_process_write(cmd); 4734 break; 4735 case TRANSPORT_PROCESS_TMR: 4736 transport_generic_do_tmr(cmd); 4737 break; 4738 case TRANSPORT_COMPLETE_QF_WP: 4739 transport_write_pending_qf(cmd); 4740 break; 4741 case TRANSPORT_COMPLETE_QF_OK: 4742 transport_complete_qf(cmd); 4743 break; 4744 default: 4745 pr_err("Unknown t_state: %d for ITT: 0x%08x " 4746 "i_state: %d on SE LUN: %u\n", 4747 cmd->t_state, 4748 cmd->se_tfo->get_task_tag(cmd), 4749 cmd->se_tfo->get_cmd_state(cmd), 4750 cmd->se_lun->unpacked_lun); 4751 BUG(); 4752 } 4753 4754 goto get_cmd; 4755 } 4756 4757 out: 4758 WARN_ON(!list_empty(&dev->state_task_list)); 4759 WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); 4760 dev->process_thread = NULL; 4761 return 0; 4762 } 4763