1 /******************************************************************************* 2 * This file contains main functions related to the iSCSI Target Core Driver. 3 * 4 * (c) Copyright 2007-2013 Datera, Inc. 5 * 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 ******************************************************************************/ 18 19 #include <linux/string.h> 20 #include <linux/kthread.h> 21 #include <linux/crypto.h> 22 #include <linux/completion.h> 23 #include <linux/module.h> 24 #include <linux/vmalloc.h> 25 #include <linux/idr.h> 26 #include <asm/unaligned.h> 27 #include <scsi/scsi_proto.h> 28 #include <scsi/iscsi_proto.h> 29 #include <scsi/scsi_tcq.h> 30 #include <target/target_core_base.h> 31 #include <target/target_core_fabric.h> 32 33 #include <target/iscsi/iscsi_target_core.h> 34 #include "iscsi_target_parameters.h" 35 #include "iscsi_target_seq_pdu_list.h" 36 #include "iscsi_target_datain_values.h" 37 #include "iscsi_target_erl0.h" 38 #include "iscsi_target_erl1.h" 39 #include "iscsi_target_erl2.h" 40 #include "iscsi_target_login.h" 41 #include "iscsi_target_tmr.h" 42 #include "iscsi_target_tpg.h" 43 #include "iscsi_target_util.h" 44 #include "iscsi_target.h" 45 #include "iscsi_target_device.h" 46 #include <target/iscsi/iscsi_target_stat.h> 47 48 #include <target/iscsi/iscsi_transport.h> 49 50 static LIST_HEAD(g_tiqn_list); 51 static LIST_HEAD(g_np_list); 52 static DEFINE_SPINLOCK(tiqn_lock); 53 static DEFINE_MUTEX(np_lock); 54 55 static struct idr tiqn_idr; 56 struct idr sess_idr; 57 struct mutex auth_id_lock; 58 spinlock_t sess_idr_lock; 59 60 struct iscsit_global *iscsit_global; 61 62 struct kmem_cache *lio_qr_cache; 63 struct kmem_cache *lio_dr_cache; 64 struct kmem_cache *lio_ooo_cache; 65 struct kmem_cache *lio_r2t_cache; 66 67 static int iscsit_handle_immediate_data(struct iscsi_cmd *, 68 struct iscsi_scsi_req *, u32); 69 70 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf) 71 { 72 struct iscsi_tiqn *tiqn = NULL; 73 74 spin_lock(&tiqn_lock); 75 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 76 if (!strcmp(tiqn->tiqn, buf)) { 77 78 spin_lock(&tiqn->tiqn_state_lock); 79 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { 80 tiqn->tiqn_access_count++; 81 spin_unlock(&tiqn->tiqn_state_lock); 82 spin_unlock(&tiqn_lock); 83 return tiqn; 84 } 85 spin_unlock(&tiqn->tiqn_state_lock); 86 } 87 } 88 spin_unlock(&tiqn_lock); 89 90 return NULL; 91 } 92 93 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn) 94 { 95 spin_lock(&tiqn->tiqn_state_lock); 96 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { 97 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN; 98 spin_unlock(&tiqn->tiqn_state_lock); 99 return 0; 100 } 101 spin_unlock(&tiqn->tiqn_state_lock); 102 103 return -1; 104 } 105 106 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn) 107 { 108 spin_lock(&tiqn->tiqn_state_lock); 109 tiqn->tiqn_access_count--; 110 spin_unlock(&tiqn->tiqn_state_lock); 111 } 112 113 /* 114 * Note that IQN formatting is expected to be done in userspace, and 115 * no explict IQN format checks are done here. 116 */ 117 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf) 118 { 119 struct iscsi_tiqn *tiqn = NULL; 120 int ret; 121 122 if (strlen(buf) >= ISCSI_IQN_LEN) { 123 pr_err("Target IQN exceeds %d bytes\n", 124 ISCSI_IQN_LEN); 125 return ERR_PTR(-EINVAL); 126 } 127 128 tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL); 129 if (!tiqn) { 130 pr_err("Unable to allocate struct iscsi_tiqn\n"); 131 return ERR_PTR(-ENOMEM); 132 } 133 134 sprintf(tiqn->tiqn, "%s", buf); 135 INIT_LIST_HEAD(&tiqn->tiqn_list); 136 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list); 137 spin_lock_init(&tiqn->tiqn_state_lock); 138 spin_lock_init(&tiqn->tiqn_tpg_lock); 139 spin_lock_init(&tiqn->sess_err_stats.lock); 140 spin_lock_init(&tiqn->login_stats.lock); 141 spin_lock_init(&tiqn->logout_stats.lock); 142 143 tiqn->tiqn_state = TIQN_STATE_ACTIVE; 144 145 idr_preload(GFP_KERNEL); 146 spin_lock(&tiqn_lock); 147 148 ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT); 149 if (ret < 0) { 150 pr_err("idr_alloc() failed for tiqn->tiqn_index\n"); 151 spin_unlock(&tiqn_lock); 152 idr_preload_end(); 153 kfree(tiqn); 154 return ERR_PTR(ret); 155 } 156 tiqn->tiqn_index = ret; 157 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list); 158 159 spin_unlock(&tiqn_lock); 160 idr_preload_end(); 161 162 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn); 163 164 return tiqn; 165 166 } 167 168 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn) 169 { 170 /* 171 * Wait for accesses to said struct iscsi_tiqn to end. 172 */ 173 spin_lock(&tiqn->tiqn_state_lock); 174 while (tiqn->tiqn_access_count != 0) { 175 spin_unlock(&tiqn->tiqn_state_lock); 176 msleep(10); 177 spin_lock(&tiqn->tiqn_state_lock); 178 } 179 spin_unlock(&tiqn->tiqn_state_lock); 180 } 181 182 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn) 183 { 184 /* 185 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN 186 * while holding tiqn->tiqn_state_lock. This means that all subsequent 187 * attempts to access this struct iscsi_tiqn will fail from both transport 188 * fabric and control code paths. 189 */ 190 if (iscsit_set_tiqn_shutdown(tiqn) < 0) { 191 pr_err("iscsit_set_tiqn_shutdown() failed\n"); 192 return; 193 } 194 195 iscsit_wait_for_tiqn(tiqn); 196 197 spin_lock(&tiqn_lock); 198 list_del(&tiqn->tiqn_list); 199 idr_remove(&tiqn_idr, tiqn->tiqn_index); 200 spin_unlock(&tiqn_lock); 201 202 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n", 203 tiqn->tiqn); 204 kfree(tiqn); 205 } 206 207 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) 208 { 209 int ret; 210 /* 211 * Determine if the network portal is accepting storage traffic. 212 */ 213 spin_lock_bh(&np->np_thread_lock); 214 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 215 spin_unlock_bh(&np->np_thread_lock); 216 return -1; 217 } 218 spin_unlock_bh(&np->np_thread_lock); 219 /* 220 * Determine if the portal group is accepting storage traffic. 221 */ 222 spin_lock_bh(&tpg->tpg_state_lock); 223 if (tpg->tpg_state != TPG_STATE_ACTIVE) { 224 spin_unlock_bh(&tpg->tpg_state_lock); 225 return -1; 226 } 227 spin_unlock_bh(&tpg->tpg_state_lock); 228 229 /* 230 * Here we serialize access across the TIQN+TPG Tuple. 231 */ 232 ret = down_interruptible(&tpg->np_login_sem); 233 if (ret != 0) 234 return -1; 235 236 spin_lock_bh(&tpg->tpg_state_lock); 237 if (tpg->tpg_state != TPG_STATE_ACTIVE) { 238 spin_unlock_bh(&tpg->tpg_state_lock); 239 up(&tpg->np_login_sem); 240 return -1; 241 } 242 spin_unlock_bh(&tpg->tpg_state_lock); 243 244 return 0; 245 } 246 247 void iscsit_login_kref_put(struct kref *kref) 248 { 249 struct iscsi_tpg_np *tpg_np = container_of(kref, 250 struct iscsi_tpg_np, tpg_np_kref); 251 252 complete(&tpg_np->tpg_np_comp); 253 } 254 255 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg, 256 struct iscsi_tpg_np *tpg_np) 257 { 258 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 259 260 up(&tpg->np_login_sem); 261 262 if (tpg_np) 263 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); 264 265 if (tiqn) 266 iscsit_put_tiqn_for_login(tiqn); 267 268 return 0; 269 } 270 271 bool iscsit_check_np_match( 272 struct sockaddr_storage *sockaddr, 273 struct iscsi_np *np, 274 int network_transport) 275 { 276 struct sockaddr_in *sock_in, *sock_in_e; 277 struct sockaddr_in6 *sock_in6, *sock_in6_e; 278 bool ip_match = false; 279 u16 port, port_e; 280 281 if (sockaddr->ss_family == AF_INET6) { 282 sock_in6 = (struct sockaddr_in6 *)sockaddr; 283 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr; 284 285 if (!memcmp(&sock_in6->sin6_addr.in6_u, 286 &sock_in6_e->sin6_addr.in6_u, 287 sizeof(struct in6_addr))) 288 ip_match = true; 289 290 port = ntohs(sock_in6->sin6_port); 291 port_e = ntohs(sock_in6_e->sin6_port); 292 } else { 293 sock_in = (struct sockaddr_in *)sockaddr; 294 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr; 295 296 if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr) 297 ip_match = true; 298 299 port = ntohs(sock_in->sin_port); 300 port_e = ntohs(sock_in_e->sin_port); 301 } 302 303 if (ip_match && (port_e == port) && 304 (np->np_network_transport == network_transport)) 305 return true; 306 307 return false; 308 } 309 310 /* 311 * Called with mutex np_lock held 312 */ 313 static struct iscsi_np *iscsit_get_np( 314 struct sockaddr_storage *sockaddr, 315 int network_transport) 316 { 317 struct iscsi_np *np; 318 bool match; 319 320 list_for_each_entry(np, &g_np_list, np_list) { 321 spin_lock_bh(&np->np_thread_lock); 322 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 323 spin_unlock_bh(&np->np_thread_lock); 324 continue; 325 } 326 327 match = iscsit_check_np_match(sockaddr, np, network_transport); 328 if (match) { 329 /* 330 * Increment the np_exports reference count now to 331 * prevent iscsit_del_np() below from being called 332 * while iscsi_tpg_add_network_portal() is called. 333 */ 334 np->np_exports++; 335 spin_unlock_bh(&np->np_thread_lock); 336 return np; 337 } 338 spin_unlock_bh(&np->np_thread_lock); 339 } 340 341 return NULL; 342 } 343 344 struct iscsi_np *iscsit_add_np( 345 struct sockaddr_storage *sockaddr, 346 int network_transport) 347 { 348 struct iscsi_np *np; 349 int ret; 350 351 mutex_lock(&np_lock); 352 353 /* 354 * Locate the existing struct iscsi_np if already active.. 355 */ 356 np = iscsit_get_np(sockaddr, network_transport); 357 if (np) { 358 mutex_unlock(&np_lock); 359 return np; 360 } 361 362 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL); 363 if (!np) { 364 pr_err("Unable to allocate memory for struct iscsi_np\n"); 365 mutex_unlock(&np_lock); 366 return ERR_PTR(-ENOMEM); 367 } 368 369 np->np_flags |= NPF_IP_NETWORK; 370 np->np_network_transport = network_transport; 371 spin_lock_init(&np->np_thread_lock); 372 init_completion(&np->np_restart_comp); 373 INIT_LIST_HEAD(&np->np_list); 374 375 ret = iscsi_target_setup_login_socket(np, sockaddr); 376 if (ret != 0) { 377 kfree(np); 378 mutex_unlock(&np_lock); 379 return ERR_PTR(ret); 380 } 381 382 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np"); 383 if (IS_ERR(np->np_thread)) { 384 pr_err("Unable to create kthread: iscsi_np\n"); 385 ret = PTR_ERR(np->np_thread); 386 kfree(np); 387 mutex_unlock(&np_lock); 388 return ERR_PTR(ret); 389 } 390 /* 391 * Increment the np_exports reference count now to prevent 392 * iscsit_del_np() below from being run while a new call to 393 * iscsi_tpg_add_network_portal() for a matching iscsi_np is 394 * active. We don't need to hold np->np_thread_lock at this 395 * point because iscsi_np has not been added to g_np_list yet. 396 */ 397 np->np_exports = 1; 398 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 399 400 list_add_tail(&np->np_list, &g_np_list); 401 mutex_unlock(&np_lock); 402 403 pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n", 404 &np->np_sockaddr, np->np_transport->name); 405 406 return np; 407 } 408 409 int iscsit_reset_np_thread( 410 struct iscsi_np *np, 411 struct iscsi_tpg_np *tpg_np, 412 struct iscsi_portal_group *tpg, 413 bool shutdown) 414 { 415 spin_lock_bh(&np->np_thread_lock); 416 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) { 417 spin_unlock_bh(&np->np_thread_lock); 418 return 0; 419 } 420 np->np_thread_state = ISCSI_NP_THREAD_RESET; 421 422 if (np->np_thread) { 423 spin_unlock_bh(&np->np_thread_lock); 424 send_sig(SIGINT, np->np_thread, 1); 425 wait_for_completion(&np->np_restart_comp); 426 spin_lock_bh(&np->np_thread_lock); 427 } 428 spin_unlock_bh(&np->np_thread_lock); 429 430 if (tpg_np && shutdown) { 431 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); 432 433 wait_for_completion(&tpg_np->tpg_np_comp); 434 } 435 436 return 0; 437 } 438 439 static void iscsit_free_np(struct iscsi_np *np) 440 { 441 if (np->np_socket) 442 sock_release(np->np_socket); 443 } 444 445 int iscsit_del_np(struct iscsi_np *np) 446 { 447 spin_lock_bh(&np->np_thread_lock); 448 np->np_exports--; 449 if (np->np_exports) { 450 np->enabled = true; 451 spin_unlock_bh(&np->np_thread_lock); 452 return 0; 453 } 454 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN; 455 spin_unlock_bh(&np->np_thread_lock); 456 457 if (np->np_thread) { 458 /* 459 * We need to send the signal to wakeup Linux/Net 460 * which may be sleeping in sock_accept().. 461 */ 462 send_sig(SIGINT, np->np_thread, 1); 463 kthread_stop(np->np_thread); 464 np->np_thread = NULL; 465 } 466 467 np->np_transport->iscsit_free_np(np); 468 469 mutex_lock(&np_lock); 470 list_del(&np->np_list); 471 mutex_unlock(&np_lock); 472 473 pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n", 474 &np->np_sockaddr, np->np_transport->name); 475 476 iscsit_put_transport(np->np_transport); 477 kfree(np); 478 return 0; 479 } 480 481 static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int); 482 static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int); 483 484 static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 485 { 486 iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 487 return 0; 488 } 489 490 static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) 491 { 492 bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); 493 494 spin_lock_bh(&conn->cmd_lock); 495 if (!list_empty(&cmd->i_conn_node)) 496 list_del_init(&cmd->i_conn_node); 497 spin_unlock_bh(&conn->cmd_lock); 498 499 __iscsit_free_cmd(cmd, scsi_cmd, true); 500 } 501 502 static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn) 503 { 504 return TARGET_PROT_NORMAL; 505 } 506 507 static struct iscsit_transport iscsi_target_transport = { 508 .name = "iSCSI/TCP", 509 .transport_type = ISCSI_TCP, 510 .owner = NULL, 511 .iscsit_setup_np = iscsit_setup_np, 512 .iscsit_accept_np = iscsit_accept_np, 513 .iscsit_free_np = iscsit_free_np, 514 .iscsit_get_login_rx = iscsit_get_login_rx, 515 .iscsit_put_login_tx = iscsit_put_login_tx, 516 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd, 517 .iscsit_immediate_queue = iscsit_immediate_queue, 518 .iscsit_response_queue = iscsit_response_queue, 519 .iscsit_queue_data_in = iscsit_queue_rsp, 520 .iscsit_queue_status = iscsit_queue_rsp, 521 .iscsit_aborted_task = iscsit_aborted_task, 522 .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops, 523 }; 524 525 static int __init iscsi_target_init_module(void) 526 { 527 int ret = 0, size; 528 529 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); 530 531 iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL); 532 if (!iscsit_global) { 533 pr_err("Unable to allocate memory for iscsit_global\n"); 534 return -1; 535 } 536 spin_lock_init(&iscsit_global->ts_bitmap_lock); 537 mutex_init(&auth_id_lock); 538 spin_lock_init(&sess_idr_lock); 539 idr_init(&tiqn_idr); 540 idr_init(&sess_idr); 541 542 ret = target_register_template(&iscsi_ops); 543 if (ret) 544 goto out; 545 546 size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long); 547 iscsit_global->ts_bitmap = vzalloc(size); 548 if (!iscsit_global->ts_bitmap) { 549 pr_err("Unable to allocate iscsit_global->ts_bitmap\n"); 550 goto configfs_out; 551 } 552 553 lio_qr_cache = kmem_cache_create("lio_qr_cache", 554 sizeof(struct iscsi_queue_req), 555 __alignof__(struct iscsi_queue_req), 0, NULL); 556 if (!lio_qr_cache) { 557 pr_err("nable to kmem_cache_create() for" 558 " lio_qr_cache\n"); 559 goto bitmap_out; 560 } 561 562 lio_dr_cache = kmem_cache_create("lio_dr_cache", 563 sizeof(struct iscsi_datain_req), 564 __alignof__(struct iscsi_datain_req), 0, NULL); 565 if (!lio_dr_cache) { 566 pr_err("Unable to kmem_cache_create() for" 567 " lio_dr_cache\n"); 568 goto qr_out; 569 } 570 571 lio_ooo_cache = kmem_cache_create("lio_ooo_cache", 572 sizeof(struct iscsi_ooo_cmdsn), 573 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL); 574 if (!lio_ooo_cache) { 575 pr_err("Unable to kmem_cache_create() for" 576 " lio_ooo_cache\n"); 577 goto dr_out; 578 } 579 580 lio_r2t_cache = kmem_cache_create("lio_r2t_cache", 581 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t), 582 0, NULL); 583 if (!lio_r2t_cache) { 584 pr_err("Unable to kmem_cache_create() for" 585 " lio_r2t_cache\n"); 586 goto ooo_out; 587 } 588 589 iscsit_register_transport(&iscsi_target_transport); 590 591 if (iscsit_load_discovery_tpg() < 0) 592 goto r2t_out; 593 594 return ret; 595 r2t_out: 596 iscsit_unregister_transport(&iscsi_target_transport); 597 kmem_cache_destroy(lio_r2t_cache); 598 ooo_out: 599 kmem_cache_destroy(lio_ooo_cache); 600 dr_out: 601 kmem_cache_destroy(lio_dr_cache); 602 qr_out: 603 kmem_cache_destroy(lio_qr_cache); 604 bitmap_out: 605 vfree(iscsit_global->ts_bitmap); 606 configfs_out: 607 /* XXX: this probably wants it to be it's own unwind step.. */ 608 if (iscsit_global->discovery_tpg) 609 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); 610 target_unregister_template(&iscsi_ops); 611 out: 612 kfree(iscsit_global); 613 return -ENOMEM; 614 } 615 616 static void __exit iscsi_target_cleanup_module(void) 617 { 618 iscsit_release_discovery_tpg(); 619 iscsit_unregister_transport(&iscsi_target_transport); 620 kmem_cache_destroy(lio_qr_cache); 621 kmem_cache_destroy(lio_dr_cache); 622 kmem_cache_destroy(lio_ooo_cache); 623 kmem_cache_destroy(lio_r2t_cache); 624 625 /* 626 * Shutdown discovery sessions and disable discovery TPG 627 */ 628 if (iscsit_global->discovery_tpg) 629 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); 630 631 target_unregister_template(&iscsi_ops); 632 633 vfree(iscsit_global->ts_bitmap); 634 kfree(iscsit_global); 635 } 636 637 static int iscsit_add_reject( 638 struct iscsi_conn *conn, 639 u8 reason, 640 unsigned char *buf) 641 { 642 struct iscsi_cmd *cmd; 643 644 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 645 if (!cmd) 646 return -1; 647 648 cmd->iscsi_opcode = ISCSI_OP_REJECT; 649 cmd->reject_reason = reason; 650 651 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); 652 if (!cmd->buf_ptr) { 653 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 654 iscsit_free_cmd(cmd, false); 655 return -1; 656 } 657 658 spin_lock_bh(&conn->cmd_lock); 659 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 660 spin_unlock_bh(&conn->cmd_lock); 661 662 cmd->i_state = ISTATE_SEND_REJECT; 663 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 664 665 return -1; 666 } 667 668 static int iscsit_add_reject_from_cmd( 669 struct iscsi_cmd *cmd, 670 u8 reason, 671 bool add_to_conn, 672 unsigned char *buf) 673 { 674 struct iscsi_conn *conn; 675 676 if (!cmd->conn) { 677 pr_err("cmd->conn is NULL for ITT: 0x%08x\n", 678 cmd->init_task_tag); 679 return -1; 680 } 681 conn = cmd->conn; 682 683 cmd->iscsi_opcode = ISCSI_OP_REJECT; 684 cmd->reject_reason = reason; 685 686 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); 687 if (!cmd->buf_ptr) { 688 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 689 iscsit_free_cmd(cmd, false); 690 return -1; 691 } 692 693 if (add_to_conn) { 694 spin_lock_bh(&conn->cmd_lock); 695 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 696 spin_unlock_bh(&conn->cmd_lock); 697 } 698 699 cmd->i_state = ISTATE_SEND_REJECT; 700 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 701 /* 702 * Perform the kref_put now if se_cmd has already been setup by 703 * scsit_setup_scsi_cmd() 704 */ 705 if (cmd->se_cmd.se_tfo != NULL) { 706 pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n"); 707 target_put_sess_cmd(&cmd->se_cmd); 708 } 709 return -1; 710 } 711 712 static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason, 713 unsigned char *buf) 714 { 715 return iscsit_add_reject_from_cmd(cmd, reason, true, buf); 716 } 717 718 int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf) 719 { 720 return iscsit_add_reject_from_cmd(cmd, reason, false, buf); 721 } 722 723 /* 724 * Map some portion of the allocated scatterlist to an iovec, suitable for 725 * kernel sockets to copy data in/out. 726 */ 727 static int iscsit_map_iovec( 728 struct iscsi_cmd *cmd, 729 struct kvec *iov, 730 u32 data_offset, 731 u32 data_length) 732 { 733 u32 i = 0; 734 struct scatterlist *sg; 735 unsigned int page_off; 736 737 /* 738 * We know each entry in t_data_sg contains a page. 739 */ 740 sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE]; 741 page_off = (data_offset % PAGE_SIZE); 742 743 cmd->first_data_sg = sg; 744 cmd->first_data_sg_off = page_off; 745 746 while (data_length) { 747 u32 cur_len = min_t(u32, data_length, sg->length - page_off); 748 749 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off; 750 iov[i].iov_len = cur_len; 751 752 data_length -= cur_len; 753 page_off = 0; 754 sg = sg_next(sg); 755 i++; 756 } 757 758 cmd->kmapped_nents = i; 759 760 return i; 761 } 762 763 static void iscsit_unmap_iovec(struct iscsi_cmd *cmd) 764 { 765 u32 i; 766 struct scatterlist *sg; 767 768 sg = cmd->first_data_sg; 769 770 for (i = 0; i < cmd->kmapped_nents; i++) 771 kunmap(sg_page(&sg[i])); 772 } 773 774 static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) 775 { 776 LIST_HEAD(ack_list); 777 struct iscsi_cmd *cmd, *cmd_p; 778 779 conn->exp_statsn = exp_statsn; 780 781 if (conn->sess->sess_ops->RDMAExtensions) 782 return; 783 784 spin_lock_bh(&conn->cmd_lock); 785 list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) { 786 spin_lock(&cmd->istate_lock); 787 if ((cmd->i_state == ISTATE_SENT_STATUS) && 788 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) { 789 cmd->i_state = ISTATE_REMOVE; 790 spin_unlock(&cmd->istate_lock); 791 list_move_tail(&cmd->i_conn_node, &ack_list); 792 continue; 793 } 794 spin_unlock(&cmd->istate_lock); 795 } 796 spin_unlock_bh(&conn->cmd_lock); 797 798 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { 799 list_del_init(&cmd->i_conn_node); 800 iscsit_free_cmd(cmd, false); 801 } 802 } 803 804 static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) 805 { 806 u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE)); 807 808 iov_count += ISCSI_IOV_DATA_BUFFER; 809 810 cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL); 811 if (!cmd->iov_data) { 812 pr_err("Unable to allocate cmd->iov_data\n"); 813 return -ENOMEM; 814 } 815 816 cmd->orig_iov_data_count = iov_count; 817 return 0; 818 } 819 820 int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 821 unsigned char *buf) 822 { 823 int data_direction, payload_length; 824 struct iscsi_scsi_req *hdr; 825 int iscsi_task_attr; 826 int sam_task_attr; 827 828 atomic_long_inc(&conn->sess->cmd_pdus); 829 830 hdr = (struct iscsi_scsi_req *) buf; 831 payload_length = ntoh24(hdr->dlength); 832 833 /* FIXME; Add checks for AdditionalHeaderSegment */ 834 835 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && 836 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { 837 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL" 838 " not set. Bad iSCSI Initiator.\n"); 839 return iscsit_add_reject_cmd(cmd, 840 ISCSI_REASON_BOOKMARK_INVALID, buf); 841 } 842 843 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 844 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { 845 /* 846 * From RFC-3720 Section 10.3.1: 847 * 848 * "Either or both of R and W MAY be 1 when either the 849 * Expected Data Transfer Length and/or Bidirectional Read 850 * Expected Data Transfer Length are 0" 851 * 852 * For this case, go ahead and clear the unnecssary bits 853 * to avoid any confusion with ->data_direction. 854 */ 855 hdr->flags &= ~ISCSI_FLAG_CMD_READ; 856 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; 857 858 pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 859 " set when Expected Data Transfer Length is 0 for" 860 " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]); 861 } 862 863 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && 864 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { 865 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE" 866 " MUST be set if Expected Data Transfer Length is not 0." 867 " Bad iSCSI Initiator\n"); 868 return iscsit_add_reject_cmd(cmd, 869 ISCSI_REASON_BOOKMARK_INVALID, buf); 870 } 871 872 if ((hdr->flags & ISCSI_FLAG_CMD_READ) && 873 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) { 874 pr_err("Bidirectional operations not supported!\n"); 875 return iscsit_add_reject_cmd(cmd, 876 ISCSI_REASON_BOOKMARK_INVALID, buf); 877 } 878 879 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 880 pr_err("Illegally set Immediate Bit in iSCSI Initiator" 881 " Scsi Command PDU.\n"); 882 return iscsit_add_reject_cmd(cmd, 883 ISCSI_REASON_BOOKMARK_INVALID, buf); 884 } 885 886 if (payload_length && !conn->sess->sess_ops->ImmediateData) { 887 pr_err("ImmediateData=No but DataSegmentLength=%u," 888 " protocol error.\n", payload_length); 889 return iscsit_add_reject_cmd(cmd, 890 ISCSI_REASON_PROTOCOL_ERROR, buf); 891 } 892 893 if ((be32_to_cpu(hdr->data_length) == payload_length) && 894 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) { 895 pr_err("Expected Data Transfer Length and Length of" 896 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL" 897 " bit is not set protocol error\n"); 898 return iscsit_add_reject_cmd(cmd, 899 ISCSI_REASON_PROTOCOL_ERROR, buf); 900 } 901 902 if (payload_length > be32_to_cpu(hdr->data_length)) { 903 pr_err("DataSegmentLength: %u is greater than" 904 " EDTL: %u, protocol error.\n", payload_length, 905 hdr->data_length); 906 return iscsit_add_reject_cmd(cmd, 907 ISCSI_REASON_PROTOCOL_ERROR, buf); 908 } 909 910 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { 911 pr_err("DataSegmentLength: %u is greater than" 912 " MaxXmitDataSegmentLength: %u, protocol error.\n", 913 payload_length, conn->conn_ops->MaxXmitDataSegmentLength); 914 return iscsit_add_reject_cmd(cmd, 915 ISCSI_REASON_PROTOCOL_ERROR, buf); 916 } 917 918 if (payload_length > conn->sess->sess_ops->FirstBurstLength) { 919 pr_err("DataSegmentLength: %u is greater than" 920 " FirstBurstLength: %u, protocol error.\n", 921 payload_length, conn->sess->sess_ops->FirstBurstLength); 922 return iscsit_add_reject_cmd(cmd, 923 ISCSI_REASON_BOOKMARK_INVALID, buf); 924 } 925 926 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE : 927 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : 928 DMA_NONE; 929 930 cmd->data_direction = data_direction; 931 iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK; 932 /* 933 * Figure out the SAM Task Attribute for the incoming SCSI CDB 934 */ 935 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || 936 (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) 937 sam_task_attr = TCM_SIMPLE_TAG; 938 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) 939 sam_task_attr = TCM_ORDERED_TAG; 940 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) 941 sam_task_attr = TCM_HEAD_TAG; 942 else if (iscsi_task_attr == ISCSI_ATTR_ACA) 943 sam_task_attr = TCM_ACA_TAG; 944 else { 945 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" 946 " TCM_SIMPLE_TAG\n", iscsi_task_attr); 947 sam_task_attr = TCM_SIMPLE_TAG; 948 } 949 950 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; 951 cmd->i_state = ISTATE_NEW_CMD; 952 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 953 cmd->immediate_data = (payload_length) ? 1 : 0; 954 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) && 955 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0); 956 if (cmd->unsolicited_data) 957 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA; 958 959 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 960 if (hdr->flags & ISCSI_FLAG_CMD_READ) 961 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); 962 else 963 cmd->targ_xfer_tag = 0xFFFFFFFF; 964 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 965 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 966 cmd->first_burst_len = payload_length; 967 968 if (!conn->sess->sess_ops->RDMAExtensions && 969 cmd->data_direction == DMA_FROM_DEVICE) { 970 struct iscsi_datain_req *dr; 971 972 dr = iscsit_allocate_datain_req(); 973 if (!dr) 974 return iscsit_add_reject_cmd(cmd, 975 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 976 977 iscsit_attach_datain_req(cmd, dr); 978 } 979 980 /* 981 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 982 */ 983 transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, 984 conn->sess->se_sess, be32_to_cpu(hdr->data_length), 985 cmd->data_direction, sam_task_attr, 986 cmd->sense_buffer + 2); 987 988 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," 989 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, 990 hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length, 991 conn->cid); 992 993 target_get_sess_cmd(&cmd->se_cmd, true); 994 995 cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd, 996 scsilun_to_int(&hdr->lun)); 997 if (cmd->sense_reason) 998 goto attach_cmd; 999 1000 /* only used for printks or comparing with ->ref_task_tag */ 1001 cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; 1002 cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb); 1003 if (cmd->sense_reason) { 1004 if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) { 1005 return iscsit_add_reject_cmd(cmd, 1006 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 1007 } 1008 1009 goto attach_cmd; 1010 } 1011 1012 if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) { 1013 return iscsit_add_reject_cmd(cmd, 1014 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 1015 } 1016 1017 attach_cmd: 1018 spin_lock_bh(&conn->cmd_lock); 1019 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1020 spin_unlock_bh(&conn->cmd_lock); 1021 /* 1022 * Check if we need to delay processing because of ALUA 1023 * Active/NonOptimized primary access state.. 1024 */ 1025 core_alua_check_nonop_delay(&cmd->se_cmd); 1026 1027 return 0; 1028 } 1029 EXPORT_SYMBOL(iscsit_setup_scsi_cmd); 1030 1031 void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd) 1032 { 1033 iscsit_set_dataout_sequence_values(cmd); 1034 1035 spin_lock_bh(&cmd->dataout_timeout_lock); 1036 iscsit_start_dataout_timer(cmd, cmd->conn); 1037 spin_unlock_bh(&cmd->dataout_timeout_lock); 1038 } 1039 EXPORT_SYMBOL(iscsit_set_unsoliticed_dataout); 1040 1041 int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1042 struct iscsi_scsi_req *hdr) 1043 { 1044 int cmdsn_ret = 0; 1045 /* 1046 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if 1047 * the Immediate Bit is not set, and no Immediate 1048 * Data is attached. 1049 * 1050 * A PDU/CmdSN carrying Immediate Data can only 1051 * be processed after the DataCRC has passed. 1052 * If the DataCRC fails, the CmdSN MUST NOT 1053 * be acknowledged. (See below) 1054 */ 1055 if (!cmd->immediate_data) { 1056 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, 1057 (unsigned char *)hdr, hdr->cmdsn); 1058 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1059 return -1; 1060 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 1061 target_put_sess_cmd(&cmd->se_cmd); 1062 return 0; 1063 } 1064 } 1065 1066 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 1067 1068 /* 1069 * If no Immediate Data is attached, it's OK to return now. 1070 */ 1071 if (!cmd->immediate_data) { 1072 if (!cmd->sense_reason && cmd->unsolicited_data) 1073 iscsit_set_unsoliticed_dataout(cmd); 1074 if (!cmd->sense_reason) 1075 return 0; 1076 1077 target_put_sess_cmd(&cmd->se_cmd); 1078 return 0; 1079 } 1080 1081 /* 1082 * Early CHECK_CONDITIONs with ImmediateData never make it to command 1083 * execution. These exceptions are processed in CmdSN order using 1084 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below. 1085 */ 1086 if (cmd->sense_reason) { 1087 if (cmd->reject_reason) 1088 return 0; 1089 1090 return 1; 1091 } 1092 /* 1093 * Call directly into transport_generic_new_cmd() to perform 1094 * the backend memory allocation. 1095 */ 1096 cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd); 1097 if (cmd->sense_reason) 1098 return 1; 1099 1100 return 0; 1101 } 1102 EXPORT_SYMBOL(iscsit_process_scsi_cmd); 1103 1104 static int 1105 iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, 1106 bool dump_payload) 1107 { 1108 int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1109 /* 1110 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. 1111 */ 1112 if (dump_payload) 1113 goto after_immediate_data; 1114 1115 immed_ret = iscsit_handle_immediate_data(cmd, hdr, 1116 cmd->first_burst_len); 1117 after_immediate_data: 1118 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { 1119 /* 1120 * A PDU/CmdSN carrying Immediate Data passed 1121 * DataCRC, check against ExpCmdSN/MaxCmdSN if 1122 * Immediate Bit is not set. 1123 */ 1124 cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd, 1125 (unsigned char *)hdr, hdr->cmdsn); 1126 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1127 return -1; 1128 1129 if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 1130 int rc; 1131 1132 rc = iscsit_dump_data_payload(cmd->conn, 1133 cmd->first_burst_len, 1); 1134 target_put_sess_cmd(&cmd->se_cmd); 1135 return rc; 1136 } else if (cmd->unsolicited_data) 1137 iscsit_set_unsoliticed_dataout(cmd); 1138 1139 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { 1140 /* 1141 * Immediate Data failed DataCRC and ERL>=1, 1142 * silently drop this PDU and let the initiator 1143 * plug the CmdSN gap. 1144 * 1145 * FIXME: Send Unsolicited NOPIN with reserved 1146 * TTT here to help the initiator figure out 1147 * the missing CmdSN, although they should be 1148 * intelligent enough to determine the missing 1149 * CmdSN and issue a retry to plug the sequence. 1150 */ 1151 cmd->i_state = ISTATE_REMOVE; 1152 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state); 1153 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ 1154 return -1; 1155 1156 return 0; 1157 } 1158 1159 static int 1160 iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1161 unsigned char *buf) 1162 { 1163 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1164 int rc, immed_data; 1165 bool dump_payload = false; 1166 1167 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1168 if (rc < 0) 1169 return 0; 1170 /* 1171 * Allocation iovecs needed for struct socket operations for 1172 * traditional iSCSI block I/O. 1173 */ 1174 if (iscsit_allocate_iovecs(cmd) < 0) { 1175 return iscsit_reject_cmd(cmd, 1176 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 1177 } 1178 immed_data = cmd->immediate_data; 1179 1180 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1181 if (rc < 0) 1182 return rc; 1183 else if (rc > 0) 1184 dump_payload = true; 1185 1186 if (!immed_data) 1187 return 0; 1188 1189 return iscsit_get_immediate_data(cmd, hdr, dump_payload); 1190 } 1191 1192 static u32 iscsit_do_crypto_hash_sg( 1193 struct hash_desc *hash, 1194 struct iscsi_cmd *cmd, 1195 u32 data_offset, 1196 u32 data_length, 1197 u32 padding, 1198 u8 *pad_bytes) 1199 { 1200 u32 data_crc; 1201 struct scatterlist *sg; 1202 unsigned int page_off; 1203 1204 crypto_hash_init(hash); 1205 1206 sg = cmd->first_data_sg; 1207 page_off = cmd->first_data_sg_off; 1208 1209 while (data_length) { 1210 u32 cur_len = min_t(u32, data_length, (sg->length - page_off)); 1211 1212 crypto_hash_update(hash, sg, cur_len); 1213 1214 data_length -= cur_len; 1215 page_off = 0; 1216 /* iscsit_map_iovec has already checked for invalid sg pointers */ 1217 sg = sg_next(sg); 1218 } 1219 1220 if (padding) { 1221 struct scatterlist pad_sg; 1222 1223 sg_init_one(&pad_sg, pad_bytes, padding); 1224 crypto_hash_update(hash, &pad_sg, padding); 1225 } 1226 crypto_hash_final(hash, (u8 *) &data_crc); 1227 1228 return data_crc; 1229 } 1230 1231 static void iscsit_do_crypto_hash_buf( 1232 struct hash_desc *hash, 1233 const void *buf, 1234 u32 payload_length, 1235 u32 padding, 1236 u8 *pad_bytes, 1237 u8 *data_crc) 1238 { 1239 struct scatterlist sg; 1240 1241 crypto_hash_init(hash); 1242 1243 sg_init_one(&sg, buf, payload_length); 1244 crypto_hash_update(hash, &sg, payload_length); 1245 1246 if (padding) { 1247 sg_init_one(&sg, pad_bytes, padding); 1248 crypto_hash_update(hash, &sg, padding); 1249 } 1250 crypto_hash_final(hash, data_crc); 1251 } 1252 1253 int 1254 iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, 1255 struct iscsi_cmd **out_cmd) 1256 { 1257 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1258 struct iscsi_cmd *cmd = NULL; 1259 struct se_cmd *se_cmd; 1260 u32 payload_length = ntoh24(hdr->dlength); 1261 int rc; 1262 1263 if (!payload_length) { 1264 pr_warn("DataOUT payload is ZERO, ignoring.\n"); 1265 return 0; 1266 } 1267 1268 /* iSCSI write */ 1269 atomic_long_add(payload_length, &conn->sess->rx_data_octets); 1270 1271 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { 1272 pr_err("DataSegmentLength: %u is greater than" 1273 " MaxXmitDataSegmentLength: %u\n", payload_length, 1274 conn->conn_ops->MaxXmitDataSegmentLength); 1275 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1276 buf); 1277 } 1278 1279 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 1280 payload_length); 1281 if (!cmd) 1282 return 0; 1283 1284 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," 1285 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 1286 hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset), 1287 payload_length, conn->cid); 1288 1289 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { 1290 pr_err("Command ITT: 0x%08x received DataOUT after" 1291 " last DataOUT received, dumping payload\n", 1292 cmd->init_task_tag); 1293 return iscsit_dump_data_payload(conn, payload_length, 1); 1294 } 1295 1296 if (cmd->data_direction != DMA_TO_DEVICE) { 1297 pr_err("Command ITT: 0x%08x received DataOUT for a" 1298 " NON-WRITE command.\n", cmd->init_task_tag); 1299 return iscsit_dump_data_payload(conn, payload_length, 1); 1300 } 1301 se_cmd = &cmd->se_cmd; 1302 iscsit_mod_dataout_timer(cmd); 1303 1304 if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) { 1305 pr_err("DataOut Offset: %u, Length %u greater than" 1306 " iSCSI Command EDTL %u, protocol error.\n", 1307 hdr->offset, payload_length, cmd->se_cmd.data_length); 1308 return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf); 1309 } 1310 1311 if (cmd->unsolicited_data) { 1312 int dump_unsolicited_data = 0; 1313 1314 if (conn->sess->sess_ops->InitialR2T) { 1315 pr_err("Received unexpected unsolicited data" 1316 " while InitialR2T=Yes, protocol error.\n"); 1317 transport_send_check_condition_and_sense(&cmd->se_cmd, 1318 TCM_UNEXPECTED_UNSOLICITED_DATA, 0); 1319 return -1; 1320 } 1321 /* 1322 * Special case for dealing with Unsolicited DataOUT 1323 * and Unsupported SAM WRITE Opcodes and SE resource allocation 1324 * failures; 1325 */ 1326 1327 /* Something's amiss if we're not in WRITE_PENDING state... */ 1328 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING); 1329 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE)) 1330 dump_unsolicited_data = 1; 1331 1332 if (dump_unsolicited_data) { 1333 /* 1334 * Check if a delayed TASK_ABORTED status needs to 1335 * be sent now if the ISCSI_FLAG_CMD_FINAL has been 1336 * received with the unsolicitied data out. 1337 */ 1338 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1339 iscsit_stop_dataout_timer(cmd); 1340 1341 transport_check_aborted_status(se_cmd, 1342 (hdr->flags & ISCSI_FLAG_CMD_FINAL)); 1343 return iscsit_dump_data_payload(conn, payload_length, 1); 1344 } 1345 } else { 1346 /* 1347 * For the normal solicited data path: 1348 * 1349 * Check for a delayed TASK_ABORTED status and dump any 1350 * incoming data out payload if one exists. Also, when the 1351 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current 1352 * data out sequence, we decrement outstanding_r2ts. Once 1353 * outstanding_r2ts reaches zero, go ahead and send the delayed 1354 * TASK_ABORTED status. 1355 */ 1356 if (se_cmd->transport_state & CMD_T_ABORTED) { 1357 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1358 if (--cmd->outstanding_r2ts < 1) { 1359 iscsit_stop_dataout_timer(cmd); 1360 transport_check_aborted_status( 1361 se_cmd, 1); 1362 } 1363 1364 return iscsit_dump_data_payload(conn, payload_length, 1); 1365 } 1366 } 1367 /* 1368 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and 1369 * within-command recovery checks before receiving the payload. 1370 */ 1371 rc = iscsit_check_pre_dataout(cmd, buf); 1372 if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY) 1373 return 0; 1374 else if (rc == DATAOUT_CANNOT_RECOVER) 1375 return -1; 1376 1377 *out_cmd = cmd; 1378 return 0; 1379 } 1380 EXPORT_SYMBOL(iscsit_check_dataout_hdr); 1381 1382 static int 1383 iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1384 struct iscsi_data *hdr) 1385 { 1386 struct kvec *iov; 1387 u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0; 1388 u32 payload_length = ntoh24(hdr->dlength); 1389 int iov_ret, data_crc_failed = 0; 1390 1391 rx_size += payload_length; 1392 iov = &cmd->iov_data[0]; 1393 1394 iov_ret = iscsit_map_iovec(cmd, iov, be32_to_cpu(hdr->offset), 1395 payload_length); 1396 if (iov_ret < 0) 1397 return -1; 1398 1399 iov_count += iov_ret; 1400 1401 padding = ((-payload_length) & 3); 1402 if (padding != 0) { 1403 iov[iov_count].iov_base = cmd->pad_bytes; 1404 iov[iov_count++].iov_len = padding; 1405 rx_size += padding; 1406 pr_debug("Receiving %u padding bytes.\n", padding); 1407 } 1408 1409 if (conn->conn_ops->DataDigest) { 1410 iov[iov_count].iov_base = &checksum; 1411 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 1412 rx_size += ISCSI_CRC_LEN; 1413 } 1414 1415 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); 1416 1417 iscsit_unmap_iovec(cmd); 1418 1419 if (rx_got != rx_size) 1420 return -1; 1421 1422 if (conn->conn_ops->DataDigest) { 1423 u32 data_crc; 1424 1425 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, 1426 be32_to_cpu(hdr->offset), 1427 payload_length, padding, 1428 cmd->pad_bytes); 1429 1430 if (checksum != data_crc) { 1431 pr_err("ITT: 0x%08x, Offset: %u, Length: %u," 1432 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x" 1433 " does not match computed 0x%08x\n", 1434 hdr->itt, hdr->offset, payload_length, 1435 hdr->datasn, checksum, data_crc); 1436 data_crc_failed = 1; 1437 } else { 1438 pr_debug("Got CRC32C DataDigest 0x%08x for" 1439 " %u bytes of Data Out\n", checksum, 1440 payload_length); 1441 } 1442 } 1443 1444 return data_crc_failed; 1445 } 1446 1447 int 1448 iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr, 1449 bool data_crc_failed) 1450 { 1451 struct iscsi_conn *conn = cmd->conn; 1452 int rc, ooo_cmdsn; 1453 /* 1454 * Increment post receive data and CRC values or perform 1455 * within-command recovery. 1456 */ 1457 rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed); 1458 if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)) 1459 return 0; 1460 else if (rc == DATAOUT_SEND_R2T) { 1461 iscsit_set_dataout_sequence_values(cmd); 1462 conn->conn_transport->iscsit_get_dataout(conn, cmd, false); 1463 } else if (rc == DATAOUT_SEND_TO_TRANSPORT) { 1464 /* 1465 * Handle extra special case for out of order 1466 * Unsolicited Data Out. 1467 */ 1468 spin_lock_bh(&cmd->istate_lock); 1469 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN); 1470 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1471 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1472 spin_unlock_bh(&cmd->istate_lock); 1473 1474 iscsit_stop_dataout_timer(cmd); 1475 if (ooo_cmdsn) 1476 return 0; 1477 target_execute_cmd(&cmd->se_cmd); 1478 return 0; 1479 } else /* DATAOUT_CANNOT_RECOVER */ 1480 return -1; 1481 1482 return 0; 1483 } 1484 EXPORT_SYMBOL(iscsit_check_dataout_payload); 1485 1486 static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) 1487 { 1488 struct iscsi_cmd *cmd = NULL; 1489 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1490 int rc; 1491 bool data_crc_failed = false; 1492 1493 rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1494 if (rc < 0) 1495 return 0; 1496 else if (!cmd) 1497 return 0; 1498 1499 rc = iscsit_get_dataout(conn, cmd, hdr); 1500 if (rc < 0) 1501 return rc; 1502 else if (rc > 0) 1503 data_crc_failed = true; 1504 1505 return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed); 1506 } 1507 1508 int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1509 struct iscsi_nopout *hdr) 1510 { 1511 u32 payload_length = ntoh24(hdr->dlength); 1512 1513 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { 1514 pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n"); 1515 if (!cmd) 1516 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1517 (unsigned char *)hdr); 1518 1519 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, 1520 (unsigned char *)hdr); 1521 } 1522 1523 if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1524 pr_err("NOPOUT ITT is reserved, but Immediate Bit is" 1525 " not set, protocol error.\n"); 1526 if (!cmd) 1527 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1528 (unsigned char *)hdr); 1529 1530 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, 1531 (unsigned char *)hdr); 1532 } 1533 1534 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { 1535 pr_err("NOPOUT Ping Data DataSegmentLength: %u is" 1536 " greater than MaxXmitDataSegmentLength: %u, protocol" 1537 " error.\n", payload_length, 1538 conn->conn_ops->MaxXmitDataSegmentLength); 1539 if (!cmd) 1540 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1541 (unsigned char *)hdr); 1542 1543 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, 1544 (unsigned char *)hdr); 1545 } 1546 1547 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x," 1548 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", 1549 hdr->itt == RESERVED_ITT ? "Response" : "Request", 1550 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn, 1551 payload_length); 1552 /* 1553 * This is not a response to a Unsolicited NopIN, which means 1554 * it can either be a NOPOUT ping request (with a valid ITT), 1555 * or a NOPOUT not requesting a NOPIN (with a reserved ITT). 1556 * Either way, make sure we allocate an struct iscsi_cmd, as both 1557 * can contain ping data. 1558 */ 1559 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 1560 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT; 1561 cmd->i_state = ISTATE_SEND_NOPIN; 1562 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1563 1 : 0); 1564 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1565 cmd->targ_xfer_tag = 0xFFFFFFFF; 1566 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 1567 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 1568 cmd->data_direction = DMA_NONE; 1569 } 1570 1571 return 0; 1572 } 1573 EXPORT_SYMBOL(iscsit_setup_nop_out); 1574 1575 int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1576 struct iscsi_nopout *hdr) 1577 { 1578 struct iscsi_cmd *cmd_p = NULL; 1579 int cmdsn_ret = 0; 1580 /* 1581 * Initiator is expecting a NopIN ping reply.. 1582 */ 1583 if (hdr->itt != RESERVED_ITT) { 1584 if (!cmd) 1585 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1586 (unsigned char *)hdr); 1587 1588 spin_lock_bh(&conn->cmd_lock); 1589 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1590 spin_unlock_bh(&conn->cmd_lock); 1591 1592 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 1593 1594 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 1595 iscsit_add_cmd_to_response_queue(cmd, conn, 1596 cmd->i_state); 1597 return 0; 1598 } 1599 1600 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, 1601 (unsigned char *)hdr, hdr->cmdsn); 1602 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1603 return 0; 1604 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1605 return -1; 1606 1607 return 0; 1608 } 1609 /* 1610 * This was a response to a unsolicited NOPIN ping. 1611 */ 1612 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { 1613 cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt)); 1614 if (!cmd_p) 1615 return -EINVAL; 1616 1617 iscsit_stop_nopin_response_timer(conn); 1618 1619 cmd_p->i_state = ISTATE_REMOVE; 1620 iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state); 1621 1622 iscsit_start_nopin_timer(conn); 1623 return 0; 1624 } 1625 /* 1626 * Otherwise, initiator is not expecting a NOPIN is response. 1627 * Just ignore for now. 1628 */ 1629 return 0; 1630 } 1631 EXPORT_SYMBOL(iscsit_process_nop_out); 1632 1633 static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1634 unsigned char *buf) 1635 { 1636 unsigned char *ping_data = NULL; 1637 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1638 struct kvec *iov = NULL; 1639 u32 payload_length = ntoh24(hdr->dlength); 1640 int ret; 1641 1642 ret = iscsit_setup_nop_out(conn, cmd, hdr); 1643 if (ret < 0) 1644 return 0; 1645 /* 1646 * Handle NOP-OUT payload for traditional iSCSI sockets 1647 */ 1648 if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 1649 u32 checksum, data_crc, padding = 0; 1650 int niov = 0, rx_got, rx_size = payload_length; 1651 1652 ping_data = kzalloc(payload_length + 1, GFP_KERNEL); 1653 if (!ping_data) { 1654 pr_err("Unable to allocate memory for" 1655 " NOPOUT ping data.\n"); 1656 ret = -1; 1657 goto out; 1658 } 1659 1660 iov = &cmd->iov_misc[0]; 1661 iov[niov].iov_base = ping_data; 1662 iov[niov++].iov_len = payload_length; 1663 1664 padding = ((-payload_length) & 3); 1665 if (padding != 0) { 1666 pr_debug("Receiving %u additional bytes" 1667 " for padding.\n", padding); 1668 iov[niov].iov_base = &cmd->pad_bytes; 1669 iov[niov++].iov_len = padding; 1670 rx_size += padding; 1671 } 1672 if (conn->conn_ops->DataDigest) { 1673 iov[niov].iov_base = &checksum; 1674 iov[niov++].iov_len = ISCSI_CRC_LEN; 1675 rx_size += ISCSI_CRC_LEN; 1676 } 1677 1678 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size); 1679 if (rx_got != rx_size) { 1680 ret = -1; 1681 goto out; 1682 } 1683 1684 if (conn->conn_ops->DataDigest) { 1685 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 1686 ping_data, payload_length, 1687 padding, cmd->pad_bytes, 1688 (u8 *)&data_crc); 1689 1690 if (checksum != data_crc) { 1691 pr_err("Ping data CRC32C DataDigest" 1692 " 0x%08x does not match computed 0x%08x\n", 1693 checksum, data_crc); 1694 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 1695 pr_err("Unable to recover from" 1696 " NOPOUT Ping DataCRC failure while in" 1697 " ERL=0.\n"); 1698 ret = -1; 1699 goto out; 1700 } else { 1701 /* 1702 * Silently drop this PDU and let the 1703 * initiator plug the CmdSN gap. 1704 */ 1705 pr_debug("Dropping NOPOUT" 1706 " Command CmdSN: 0x%08x due to" 1707 " DataCRC error.\n", hdr->cmdsn); 1708 ret = 0; 1709 goto out; 1710 } 1711 } else { 1712 pr_debug("Got CRC32C DataDigest" 1713 " 0x%08x for %u bytes of ping data.\n", 1714 checksum, payload_length); 1715 } 1716 } 1717 1718 ping_data[payload_length] = '\0'; 1719 /* 1720 * Attach ping data to struct iscsi_cmd->buf_ptr. 1721 */ 1722 cmd->buf_ptr = ping_data; 1723 cmd->buf_ptr_size = payload_length; 1724 1725 pr_debug("Got %u bytes of NOPOUT ping" 1726 " data.\n", payload_length); 1727 pr_debug("Ping Data: \"%s\"\n", ping_data); 1728 } 1729 1730 return iscsit_process_nop_out(conn, cmd, hdr); 1731 out: 1732 if (cmd) 1733 iscsit_free_cmd(cmd, false); 1734 1735 kfree(ping_data); 1736 return ret; 1737 } 1738 1739 int 1740 iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1741 unsigned char *buf) 1742 { 1743 struct se_tmr_req *se_tmr; 1744 struct iscsi_tmr_req *tmr_req; 1745 struct iscsi_tm *hdr; 1746 int out_of_order_cmdsn = 0, ret; 1747 bool sess_ref = false; 1748 u8 function; 1749 1750 hdr = (struct iscsi_tm *) buf; 1751 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 1752 function = hdr->flags; 1753 1754 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:" 1755 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:" 1756 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function, 1757 hdr->rtt, hdr->refcmdsn, conn->cid); 1758 1759 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1760 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 1761 hdr->rtt != RESERVED_ITT)) { 1762 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n"); 1763 hdr->rtt = RESERVED_ITT; 1764 } 1765 1766 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) && 1767 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1768 pr_err("Task Management Request TASK_REASSIGN not" 1769 " issued as immediate command, bad iSCSI Initiator" 1770 "implementation\n"); 1771 return iscsit_add_reject_cmd(cmd, 1772 ISCSI_REASON_PROTOCOL_ERROR, buf); 1773 } 1774 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1775 be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG) 1776 hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG); 1777 1778 cmd->data_direction = DMA_NONE; 1779 1780 cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL); 1781 if (!cmd->tmr_req) { 1782 pr_err("Unable to allocate memory for" 1783 " Task Management command!\n"); 1784 return iscsit_add_reject_cmd(cmd, 1785 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1786 buf); 1787 } 1788 1789 /* 1790 * TASK_REASSIGN for ERL=2 / connection stays inside of 1791 * LIO-Target $FABRIC_MOD 1792 */ 1793 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { 1794 1795 u8 tcm_function; 1796 int ret; 1797 1798 transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops, 1799 conn->sess->se_sess, 0, DMA_NONE, 1800 TCM_SIMPLE_TAG, cmd->sense_buffer + 2); 1801 1802 target_get_sess_cmd(&cmd->se_cmd, true); 1803 sess_ref = true; 1804 1805 switch (function) { 1806 case ISCSI_TM_FUNC_ABORT_TASK: 1807 tcm_function = TMR_ABORT_TASK; 1808 break; 1809 case ISCSI_TM_FUNC_ABORT_TASK_SET: 1810 tcm_function = TMR_ABORT_TASK_SET; 1811 break; 1812 case ISCSI_TM_FUNC_CLEAR_ACA: 1813 tcm_function = TMR_CLEAR_ACA; 1814 break; 1815 case ISCSI_TM_FUNC_CLEAR_TASK_SET: 1816 tcm_function = TMR_CLEAR_TASK_SET; 1817 break; 1818 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: 1819 tcm_function = TMR_LUN_RESET; 1820 break; 1821 case ISCSI_TM_FUNC_TARGET_WARM_RESET: 1822 tcm_function = TMR_TARGET_WARM_RESET; 1823 break; 1824 case ISCSI_TM_FUNC_TARGET_COLD_RESET: 1825 tcm_function = TMR_TARGET_COLD_RESET; 1826 break; 1827 default: 1828 pr_err("Unknown iSCSI TMR Function:" 1829 " 0x%02x\n", function); 1830 return iscsit_add_reject_cmd(cmd, 1831 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 1832 } 1833 1834 ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, 1835 tcm_function, GFP_KERNEL); 1836 if (ret < 0) 1837 return iscsit_add_reject_cmd(cmd, 1838 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 1839 1840 cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; 1841 } 1842 1843 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; 1844 cmd->i_state = ISTATE_SEND_TASKMGTRSP; 1845 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1846 cmd->init_task_tag = hdr->itt; 1847 cmd->targ_xfer_tag = 0xFFFFFFFF; 1848 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 1849 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 1850 se_tmr = cmd->se_cmd.se_tmr_req; 1851 tmr_req = cmd->tmr_req; 1852 /* 1853 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN 1854 */ 1855 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { 1856 ret = transport_lookup_tmr_lun(&cmd->se_cmd, 1857 scsilun_to_int(&hdr->lun)); 1858 if (ret < 0) { 1859 se_tmr->response = ISCSI_TMF_RSP_NO_LUN; 1860 goto attach; 1861 } 1862 } 1863 1864 switch (function) { 1865 case ISCSI_TM_FUNC_ABORT_TASK: 1866 se_tmr->response = iscsit_tmr_abort_task(cmd, buf); 1867 if (se_tmr->response) 1868 goto attach; 1869 break; 1870 case ISCSI_TM_FUNC_ABORT_TASK_SET: 1871 case ISCSI_TM_FUNC_CLEAR_ACA: 1872 case ISCSI_TM_FUNC_CLEAR_TASK_SET: 1873 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: 1874 break; 1875 case ISCSI_TM_FUNC_TARGET_WARM_RESET: 1876 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) { 1877 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; 1878 goto attach; 1879 } 1880 break; 1881 case ISCSI_TM_FUNC_TARGET_COLD_RESET: 1882 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) { 1883 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; 1884 goto attach; 1885 } 1886 break; 1887 case ISCSI_TM_FUNC_TASK_REASSIGN: 1888 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf); 1889 /* 1890 * Perform sanity checks on the ExpDataSN only if the 1891 * TASK_REASSIGN was successful. 1892 */ 1893 if (se_tmr->response) 1894 break; 1895 1896 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0) 1897 return iscsit_add_reject_cmd(cmd, 1898 ISCSI_REASON_BOOKMARK_INVALID, buf); 1899 break; 1900 default: 1901 pr_err("Unknown TMR function: 0x%02x, protocol" 1902 " error.\n", function); 1903 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED; 1904 goto attach; 1905 } 1906 1907 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 1908 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE)) 1909 se_tmr->call_transport = 1; 1910 attach: 1911 spin_lock_bh(&conn->cmd_lock); 1912 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1913 spin_unlock_bh(&conn->cmd_lock); 1914 1915 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1916 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 1917 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) 1918 out_of_order_cmdsn = 1; 1919 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1920 return 0; 1921 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1922 return -1; 1923 } 1924 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 1925 1926 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE)) 1927 return 0; 1928 /* 1929 * Found the referenced task, send to transport for processing. 1930 */ 1931 if (se_tmr->call_transport) 1932 return transport_generic_handle_tmr(&cmd->se_cmd); 1933 1934 /* 1935 * Could not find the referenced LUN, task, or Task Management 1936 * command not authorized or supported. Change state and 1937 * let the tx_thread send the response. 1938 * 1939 * For connection recovery, this is also the default action for 1940 * TMR TASK_REASSIGN. 1941 */ 1942 if (sess_ref) { 1943 pr_debug("Handle TMR, using sess_ref=true check\n"); 1944 target_put_sess_cmd(&cmd->se_cmd); 1945 } 1946 1947 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 1948 return 0; 1949 } 1950 EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd); 1951 1952 /* #warning FIXME: Support Text Command parameters besides SendTargets */ 1953 int 1954 iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1955 struct iscsi_text *hdr) 1956 { 1957 u32 payload_length = ntoh24(hdr->dlength); 1958 1959 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { 1960 pr_err("Unable to accept text parameter length: %u" 1961 "greater than MaxXmitDataSegmentLength %u.\n", 1962 payload_length, conn->conn_ops->MaxXmitDataSegmentLength); 1963 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, 1964 (unsigned char *)hdr); 1965 } 1966 1967 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) || 1968 (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) { 1969 pr_err("Multi sequence text commands currently not supported\n"); 1970 return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED, 1971 (unsigned char *)hdr); 1972 } 1973 1974 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x," 1975 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn, 1976 hdr->exp_statsn, payload_length); 1977 1978 cmd->iscsi_opcode = ISCSI_OP_TEXT; 1979 cmd->i_state = ISTATE_SEND_TEXTRSP; 1980 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1981 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1982 cmd->targ_xfer_tag = 0xFFFFFFFF; 1983 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 1984 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 1985 cmd->data_direction = DMA_NONE; 1986 cmd->text_in_ptr = NULL; 1987 1988 return 0; 1989 } 1990 EXPORT_SYMBOL(iscsit_setup_text_cmd); 1991 1992 int 1993 iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 1994 struct iscsi_text *hdr) 1995 { 1996 unsigned char *text_in = cmd->text_in_ptr, *text_ptr; 1997 int cmdsn_ret; 1998 1999 if (!text_in) { 2000 cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt); 2001 if (cmd->targ_xfer_tag == 0xFFFFFFFF) { 2002 pr_err("Unable to locate text_in buffer for sendtargets" 2003 " discovery\n"); 2004 goto reject; 2005 } 2006 goto empty_sendtargets; 2007 } 2008 if (strncmp("SendTargets", text_in, 11) != 0) { 2009 pr_err("Received Text Data that is not" 2010 " SendTargets, cannot continue.\n"); 2011 goto reject; 2012 } 2013 text_ptr = strchr(text_in, '='); 2014 if (!text_ptr) { 2015 pr_err("No \"=\" separator found in Text Data," 2016 " cannot continue.\n"); 2017 goto reject; 2018 } 2019 if (!strncmp("=All", text_ptr, 4)) { 2020 cmd->cmd_flags |= ICF_SENDTARGETS_ALL; 2021 } else if (!strncmp("=iqn.", text_ptr, 5) || 2022 !strncmp("=eui.", text_ptr, 5)) { 2023 cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE; 2024 } else { 2025 pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr); 2026 goto reject; 2027 } 2028 2029 spin_lock_bh(&conn->cmd_lock); 2030 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 2031 spin_unlock_bh(&conn->cmd_lock); 2032 2033 empty_sendtargets: 2034 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 2035 2036 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 2037 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, 2038 (unsigned char *)hdr, hdr->cmdsn); 2039 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 2040 return -1; 2041 2042 return 0; 2043 } 2044 2045 return iscsit_execute_cmd(cmd, 0); 2046 2047 reject: 2048 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, 2049 (unsigned char *)hdr); 2050 } 2051 EXPORT_SYMBOL(iscsit_process_text_cmd); 2052 2053 static int 2054 iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2055 unsigned char *buf) 2056 { 2057 struct iscsi_text *hdr = (struct iscsi_text *)buf; 2058 char *text_in = NULL; 2059 u32 payload_length = ntoh24(hdr->dlength); 2060 int rx_size, rc; 2061 2062 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 2063 if (rc < 0) 2064 return 0; 2065 2066 rx_size = payload_length; 2067 if (payload_length) { 2068 u32 checksum = 0, data_crc = 0; 2069 u32 padding = 0, pad_bytes = 0; 2070 int niov = 0, rx_got; 2071 struct kvec iov[3]; 2072 2073 text_in = kzalloc(payload_length, GFP_KERNEL); 2074 if (!text_in) { 2075 pr_err("Unable to allocate memory for" 2076 " incoming text parameters\n"); 2077 goto reject; 2078 } 2079 cmd->text_in_ptr = text_in; 2080 2081 memset(iov, 0, 3 * sizeof(struct kvec)); 2082 iov[niov].iov_base = text_in; 2083 iov[niov++].iov_len = payload_length; 2084 2085 padding = ((-payload_length) & 3); 2086 if (padding != 0) { 2087 iov[niov].iov_base = &pad_bytes; 2088 iov[niov++].iov_len = padding; 2089 rx_size += padding; 2090 pr_debug("Receiving %u additional bytes" 2091 " for padding.\n", padding); 2092 } 2093 if (conn->conn_ops->DataDigest) { 2094 iov[niov].iov_base = &checksum; 2095 iov[niov++].iov_len = ISCSI_CRC_LEN; 2096 rx_size += ISCSI_CRC_LEN; 2097 } 2098 2099 rx_got = rx_data(conn, &iov[0], niov, rx_size); 2100 if (rx_got != rx_size) 2101 goto reject; 2102 2103 if (conn->conn_ops->DataDigest) { 2104 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 2105 text_in, payload_length, 2106 padding, (u8 *)&pad_bytes, 2107 (u8 *)&data_crc); 2108 2109 if (checksum != data_crc) { 2110 pr_err("Text data CRC32C DataDigest" 2111 " 0x%08x does not match computed" 2112 " 0x%08x\n", checksum, data_crc); 2113 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2114 pr_err("Unable to recover from" 2115 " Text Data digest failure while in" 2116 " ERL=0.\n"); 2117 goto reject; 2118 } else { 2119 /* 2120 * Silently drop this PDU and let the 2121 * initiator plug the CmdSN gap. 2122 */ 2123 pr_debug("Dropping Text" 2124 " Command CmdSN: 0x%08x due to" 2125 " DataCRC error.\n", hdr->cmdsn); 2126 kfree(text_in); 2127 return 0; 2128 } 2129 } else { 2130 pr_debug("Got CRC32C DataDigest" 2131 " 0x%08x for %u bytes of text data.\n", 2132 checksum, payload_length); 2133 } 2134 } 2135 text_in[payload_length - 1] = '\0'; 2136 pr_debug("Successfully read %d bytes of text" 2137 " data.\n", payload_length); 2138 } 2139 2140 return iscsit_process_text_cmd(conn, cmd, hdr); 2141 2142 reject: 2143 kfree(cmd->text_in_ptr); 2144 cmd->text_in_ptr = NULL; 2145 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); 2146 } 2147 2148 int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2149 { 2150 struct iscsi_conn *conn_p; 2151 struct iscsi_session *sess = conn->sess; 2152 2153 pr_debug("Received logout request CLOSESESSION on CID: %hu" 2154 " for SID: %u.\n", conn->cid, conn->sess->sid); 2155 2156 atomic_set(&sess->session_logout, 1); 2157 atomic_set(&conn->conn_logout_remove, 1); 2158 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION; 2159 2160 iscsit_inc_conn_usage_count(conn); 2161 iscsit_inc_session_usage_count(sess); 2162 2163 spin_lock_bh(&sess->conn_lock); 2164 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) { 2165 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN) 2166 continue; 2167 2168 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 2169 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT; 2170 } 2171 spin_unlock_bh(&sess->conn_lock); 2172 2173 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2174 2175 return 0; 2176 } 2177 2178 int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2179 { 2180 struct iscsi_conn *l_conn; 2181 struct iscsi_session *sess = conn->sess; 2182 2183 pr_debug("Received logout request CLOSECONNECTION for CID:" 2184 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); 2185 2186 /* 2187 * A Logout Request with a CLOSECONNECTION reason code for a CID 2188 * can arrive on a connection with a differing CID. 2189 */ 2190 if (conn->cid == cmd->logout_cid) { 2191 spin_lock_bh(&conn->state_lock); 2192 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 2193 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; 2194 2195 atomic_set(&conn->conn_logout_remove, 1); 2196 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION; 2197 iscsit_inc_conn_usage_count(conn); 2198 2199 spin_unlock_bh(&conn->state_lock); 2200 } else { 2201 /* 2202 * Handle all different cid CLOSECONNECTION requests in 2203 * iscsit_logout_post_handler_diffcid() as to give enough 2204 * time for any non immediate command's CmdSN to be 2205 * acknowledged on the connection in question. 2206 * 2207 * Here we simply make sure the CID is still around. 2208 */ 2209 l_conn = iscsit_get_conn_from_cid(sess, 2210 cmd->logout_cid); 2211 if (!l_conn) { 2212 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; 2213 iscsit_add_cmd_to_response_queue(cmd, conn, 2214 cmd->i_state); 2215 return 0; 2216 } 2217 2218 iscsit_dec_conn_usage_count(l_conn); 2219 } 2220 2221 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2222 2223 return 0; 2224 } 2225 2226 int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2227 { 2228 struct iscsi_session *sess = conn->sess; 2229 2230 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for" 2231 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); 2232 2233 if (sess->sess_ops->ErrorRecoveryLevel != 2) { 2234 pr_err("Received Logout Request REMOVECONNFORRECOVERY" 2235 " while ERL!=2.\n"); 2236 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED; 2237 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2238 return 0; 2239 } 2240 2241 if (conn->cid == cmd->logout_cid) { 2242 pr_err("Received Logout Request REMOVECONNFORRECOVERY" 2243 " with CID: %hu on CID: %hu, implementation error.\n", 2244 cmd->logout_cid, conn->cid); 2245 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED; 2246 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2247 return 0; 2248 } 2249 2250 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2251 2252 return 0; 2253 } 2254 2255 int 2256 iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2257 unsigned char *buf) 2258 { 2259 int cmdsn_ret, logout_remove = 0; 2260 u8 reason_code = 0; 2261 struct iscsi_logout *hdr; 2262 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn); 2263 2264 hdr = (struct iscsi_logout *) buf; 2265 reason_code = (hdr->flags & 0x7f); 2266 2267 if (tiqn) { 2268 spin_lock(&tiqn->logout_stats.lock); 2269 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) 2270 tiqn->logout_stats.normal_logouts++; 2271 else 2272 tiqn->logout_stats.abnormal_logouts++; 2273 spin_unlock(&tiqn->logout_stats.lock); 2274 } 2275 2276 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x" 2277 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n", 2278 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code, 2279 hdr->cid, conn->cid); 2280 2281 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { 2282 pr_err("Received logout request on connection that" 2283 " is not in logged in state, ignoring request.\n"); 2284 iscsit_free_cmd(cmd, false); 2285 return 0; 2286 } 2287 2288 cmd->iscsi_opcode = ISCSI_OP_LOGOUT; 2289 cmd->i_state = ISTATE_SEND_LOGOUTRSP; 2290 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 2291 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 2292 cmd->targ_xfer_tag = 0xFFFFFFFF; 2293 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 2294 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 2295 cmd->logout_cid = be16_to_cpu(hdr->cid); 2296 cmd->logout_reason = reason_code; 2297 cmd->data_direction = DMA_NONE; 2298 2299 /* 2300 * We need to sleep in these cases (by returning 1) until the Logout 2301 * Response gets sent in the tx thread. 2302 */ 2303 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) || 2304 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) && 2305 be16_to_cpu(hdr->cid) == conn->cid)) 2306 logout_remove = 1; 2307 2308 spin_lock_bh(&conn->cmd_lock); 2309 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 2310 spin_unlock_bh(&conn->cmd_lock); 2311 2312 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY) 2313 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 2314 2315 /* 2316 * Immediate commands are executed, well, immediately. 2317 * Non-Immediate Logout Commands are executed in CmdSN order. 2318 */ 2319 if (cmd->immediate_cmd) { 2320 int ret = iscsit_execute_cmd(cmd, 0); 2321 2322 if (ret < 0) 2323 return ret; 2324 } else { 2325 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 2326 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 2327 logout_remove = 0; 2328 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 2329 return -1; 2330 } 2331 2332 return logout_remove; 2333 } 2334 EXPORT_SYMBOL(iscsit_handle_logout_cmd); 2335 2336 static int iscsit_handle_snack( 2337 struct iscsi_conn *conn, 2338 unsigned char *buf) 2339 { 2340 struct iscsi_snack *hdr; 2341 2342 hdr = (struct iscsi_snack *) buf; 2343 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 2344 2345 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:" 2346 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x," 2347 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags, 2348 hdr->begrun, hdr->runlength, conn->cid); 2349 2350 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2351 pr_err("Initiator sent SNACK request while in" 2352 " ErrorRecoveryLevel=0.\n"); 2353 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 2354 buf); 2355 } 2356 /* 2357 * SNACK_DATA and SNACK_R2T are both 0, so check which function to 2358 * call from inside iscsi_send_recovery_datain_or_r2t(). 2359 */ 2360 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) { 2361 case 0: 2362 return iscsit_handle_recovery_datain_or_r2t(conn, buf, 2363 hdr->itt, 2364 be32_to_cpu(hdr->ttt), 2365 be32_to_cpu(hdr->begrun), 2366 be32_to_cpu(hdr->runlength)); 2367 case ISCSI_FLAG_SNACK_TYPE_STATUS: 2368 return iscsit_handle_status_snack(conn, hdr->itt, 2369 be32_to_cpu(hdr->ttt), 2370 be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength)); 2371 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK: 2372 return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt), 2373 be32_to_cpu(hdr->begrun), 2374 be32_to_cpu(hdr->runlength)); 2375 case ISCSI_FLAG_SNACK_TYPE_RDATA: 2376 /* FIXME: Support R-Data SNACK */ 2377 pr_err("R-Data SNACK Not Supported.\n"); 2378 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 2379 buf); 2380 default: 2381 pr_err("Unknown SNACK type 0x%02x, protocol" 2382 " error.\n", hdr->flags & 0x0f); 2383 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 2384 buf); 2385 } 2386 2387 return 0; 2388 } 2389 2390 static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn) 2391 { 2392 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || 2393 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { 2394 wait_for_completion_interruptible_timeout( 2395 &conn->rx_half_close_comp, 2396 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ); 2397 } 2398 } 2399 2400 static int iscsit_handle_immediate_data( 2401 struct iscsi_cmd *cmd, 2402 struct iscsi_scsi_req *hdr, 2403 u32 length) 2404 { 2405 int iov_ret, rx_got = 0, rx_size = 0; 2406 u32 checksum, iov_count = 0, padding = 0; 2407 struct iscsi_conn *conn = cmd->conn; 2408 struct kvec *iov; 2409 2410 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length); 2411 if (iov_ret < 0) 2412 return IMMEDIATE_DATA_CANNOT_RECOVER; 2413 2414 rx_size = length; 2415 iov_count = iov_ret; 2416 iov = &cmd->iov_data[0]; 2417 2418 padding = ((-length) & 3); 2419 if (padding != 0) { 2420 iov[iov_count].iov_base = cmd->pad_bytes; 2421 iov[iov_count++].iov_len = padding; 2422 rx_size += padding; 2423 } 2424 2425 if (conn->conn_ops->DataDigest) { 2426 iov[iov_count].iov_base = &checksum; 2427 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 2428 rx_size += ISCSI_CRC_LEN; 2429 } 2430 2431 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); 2432 2433 iscsit_unmap_iovec(cmd); 2434 2435 if (rx_got != rx_size) { 2436 iscsit_rx_thread_wait_for_tcp(conn); 2437 return IMMEDIATE_DATA_CANNOT_RECOVER; 2438 } 2439 2440 if (conn->conn_ops->DataDigest) { 2441 u32 data_crc; 2442 2443 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, 2444 cmd->write_data_done, length, padding, 2445 cmd->pad_bytes); 2446 2447 if (checksum != data_crc) { 2448 pr_err("ImmediateData CRC32C DataDigest 0x%08x" 2449 " does not match computed 0x%08x\n", checksum, 2450 data_crc); 2451 2452 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2453 pr_err("Unable to recover from" 2454 " Immediate Data digest failure while" 2455 " in ERL=0.\n"); 2456 iscsit_reject_cmd(cmd, 2457 ISCSI_REASON_DATA_DIGEST_ERROR, 2458 (unsigned char *)hdr); 2459 return IMMEDIATE_DATA_CANNOT_RECOVER; 2460 } else { 2461 iscsit_reject_cmd(cmd, 2462 ISCSI_REASON_DATA_DIGEST_ERROR, 2463 (unsigned char *)hdr); 2464 return IMMEDIATE_DATA_ERL1_CRC_FAILURE; 2465 } 2466 } else { 2467 pr_debug("Got CRC32C DataDigest 0x%08x for" 2468 " %u bytes of Immediate Data\n", checksum, 2469 length); 2470 } 2471 } 2472 2473 cmd->write_data_done += length; 2474 2475 if (cmd->write_data_done == cmd->se_cmd.data_length) { 2476 spin_lock_bh(&cmd->istate_lock); 2477 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 2478 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 2479 spin_unlock_bh(&cmd->istate_lock); 2480 } 2481 2482 return IMMEDIATE_DATA_NORMAL_OPERATION; 2483 } 2484 2485 /* 2486 * Called with sess->conn_lock held. 2487 */ 2488 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections 2489 with active network interface */ 2490 static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) 2491 { 2492 struct iscsi_cmd *cmd; 2493 struct iscsi_conn *conn_p; 2494 bool found = false; 2495 2496 /* 2497 * Only send a Asynchronous Message on connections whos network 2498 * interface is still functional. 2499 */ 2500 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) { 2501 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) { 2502 iscsit_inc_conn_usage_count(conn_p); 2503 found = true; 2504 break; 2505 } 2506 } 2507 2508 if (!found) 2509 return; 2510 2511 cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING); 2512 if (!cmd) { 2513 iscsit_dec_conn_usage_count(conn_p); 2514 return; 2515 } 2516 2517 cmd->logout_cid = conn->cid; 2518 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2519 cmd->i_state = ISTATE_SEND_ASYNCMSG; 2520 2521 spin_lock_bh(&conn_p->cmd_lock); 2522 list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list); 2523 spin_unlock_bh(&conn_p->cmd_lock); 2524 2525 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state); 2526 iscsit_dec_conn_usage_count(conn_p); 2527 } 2528 2529 static int iscsit_send_conn_drop_async_message( 2530 struct iscsi_cmd *cmd, 2531 struct iscsi_conn *conn) 2532 { 2533 struct iscsi_async *hdr; 2534 2535 cmd->tx_size = ISCSI_HDR_LEN; 2536 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2537 2538 hdr = (struct iscsi_async *) cmd->pdu; 2539 hdr->opcode = ISCSI_OP_ASYNC_EVENT; 2540 hdr->flags = ISCSI_FLAG_CMD_FINAL; 2541 cmd->init_task_tag = RESERVED_ITT; 2542 cmd->targ_xfer_tag = 0xFFFFFFFF; 2543 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]); 2544 cmd->stat_sn = conn->stat_sn++; 2545 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2546 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2547 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 2548 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION; 2549 hdr->param1 = cpu_to_be16(cmd->logout_cid); 2550 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait); 2551 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain); 2552 2553 if (conn->conn_ops->HeaderDigest) { 2554 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2555 2556 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, 2557 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); 2558 2559 cmd->tx_size += ISCSI_CRC_LEN; 2560 pr_debug("Attaching CRC32C HeaderDigest to" 2561 " Async Message 0x%08x\n", *header_digest); 2562 } 2563 2564 cmd->iov_misc[0].iov_base = cmd->pdu; 2565 cmd->iov_misc[0].iov_len = cmd->tx_size; 2566 cmd->iov_misc_count = 1; 2567 2568 pr_debug("Sending Connection Dropped Async Message StatSN:" 2569 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn, 2570 cmd->logout_cid, conn->cid); 2571 return 0; 2572 } 2573 2574 static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) 2575 { 2576 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || 2577 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { 2578 wait_for_completion_interruptible_timeout( 2579 &conn->tx_half_close_comp, 2580 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ); 2581 } 2582 } 2583 2584 static void 2585 iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2586 struct iscsi_datain *datain, struct iscsi_data_rsp *hdr, 2587 bool set_statsn) 2588 { 2589 hdr->opcode = ISCSI_OP_SCSI_DATA_IN; 2590 hdr->flags = datain->flags; 2591 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 2592 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 2593 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; 2594 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2595 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 2596 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; 2597 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2598 } 2599 } 2600 hton24(hdr->dlength, datain->length); 2601 if (hdr->flags & ISCSI_FLAG_DATA_ACK) 2602 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2603 (struct scsi_lun *)&hdr->lun); 2604 else 2605 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2606 2607 hdr->itt = cmd->init_task_tag; 2608 2609 if (hdr->flags & ISCSI_FLAG_DATA_ACK) 2610 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2611 else 2612 hdr->ttt = cpu_to_be32(0xFFFFFFFF); 2613 if (set_statsn) 2614 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2615 else 2616 hdr->statsn = cpu_to_be32(0xFFFFFFFF); 2617 2618 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2619 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 2620 hdr->datasn = cpu_to_be32(datain->data_sn); 2621 hdr->offset = cpu_to_be32(datain->offset); 2622 2623 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x," 2624 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 2625 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), 2626 ntohl(hdr->offset), datain->length, conn->cid); 2627 } 2628 2629 static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2630 { 2631 struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0]; 2632 struct iscsi_datain datain; 2633 struct iscsi_datain_req *dr; 2634 struct kvec *iov; 2635 u32 iov_count = 0, tx_size = 0; 2636 int eodr = 0, ret, iov_ret; 2637 bool set_statsn = false; 2638 2639 memset(&datain, 0, sizeof(struct iscsi_datain)); 2640 dr = iscsit_get_datain_values(cmd, &datain); 2641 if (!dr) { 2642 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n", 2643 cmd->init_task_tag); 2644 return -1; 2645 } 2646 /* 2647 * Be paranoid and double check the logic for now. 2648 */ 2649 if ((datain.offset + datain.length) > cmd->se_cmd.data_length) { 2650 pr_err("Command ITT: 0x%08x, datain.offset: %u and" 2651 " datain.length: %u exceeds cmd->data_length: %u\n", 2652 cmd->init_task_tag, datain.offset, datain.length, 2653 cmd->se_cmd.data_length); 2654 return -1; 2655 } 2656 2657 atomic_long_add(datain.length, &conn->sess->tx_data_octets); 2658 /* 2659 * Special case for successfully execution w/ both DATAIN 2660 * and Sense Data. 2661 */ 2662 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) && 2663 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) 2664 datain.flags &= ~ISCSI_FLAG_DATA_STATUS; 2665 else { 2666 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) || 2667 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) { 2668 iscsit_increment_maxcmdsn(cmd, conn->sess); 2669 cmd->stat_sn = conn->stat_sn++; 2670 set_statsn = true; 2671 } else if (dr->dr_complete == 2672 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY) 2673 set_statsn = true; 2674 } 2675 2676 iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn); 2677 2678 iov = &cmd->iov_data[0]; 2679 iov[iov_count].iov_base = cmd->pdu; 2680 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 2681 tx_size += ISCSI_HDR_LEN; 2682 2683 if (conn->conn_ops->HeaderDigest) { 2684 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2685 2686 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, 2687 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); 2688 2689 iov[0].iov_len += ISCSI_CRC_LEN; 2690 tx_size += ISCSI_CRC_LEN; 2691 2692 pr_debug("Attaching CRC32 HeaderDigest" 2693 " for DataIN PDU 0x%08x\n", *header_digest); 2694 } 2695 2696 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], 2697 datain.offset, datain.length); 2698 if (iov_ret < 0) 2699 return -1; 2700 2701 iov_count += iov_ret; 2702 tx_size += datain.length; 2703 2704 cmd->padding = ((-datain.length) & 3); 2705 if (cmd->padding) { 2706 iov[iov_count].iov_base = cmd->pad_bytes; 2707 iov[iov_count++].iov_len = cmd->padding; 2708 tx_size += cmd->padding; 2709 2710 pr_debug("Attaching %u padding bytes\n", 2711 cmd->padding); 2712 } 2713 if (conn->conn_ops->DataDigest) { 2714 cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd, 2715 datain.offset, datain.length, cmd->padding, cmd->pad_bytes); 2716 2717 iov[iov_count].iov_base = &cmd->data_crc; 2718 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 2719 tx_size += ISCSI_CRC_LEN; 2720 2721 pr_debug("Attached CRC32C DataDigest %d bytes, crc" 2722 " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc); 2723 } 2724 2725 cmd->iov_data_count = iov_count; 2726 cmd->tx_size = tx_size; 2727 2728 ret = iscsit_fe_sendpage_sg(cmd, conn); 2729 2730 iscsit_unmap_iovec(cmd); 2731 2732 if (ret < 0) { 2733 iscsit_tx_thread_wait_for_tcp(conn); 2734 return ret; 2735 } 2736 2737 if (dr->dr_complete) { 2738 eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? 2739 2 : 1; 2740 iscsit_free_datain_req(cmd, dr); 2741 } 2742 2743 return eodr; 2744 } 2745 2746 int 2747 iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2748 struct iscsi_logout_rsp *hdr) 2749 { 2750 struct iscsi_conn *logout_conn = NULL; 2751 struct iscsi_conn_recovery *cr = NULL; 2752 struct iscsi_session *sess = conn->sess; 2753 /* 2754 * The actual shutting down of Sessions and/or Connections 2755 * for CLOSESESSION and CLOSECONNECTION Logout Requests 2756 * is done in scsi_logout_post_handler(). 2757 */ 2758 switch (cmd->logout_reason) { 2759 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 2760 pr_debug("iSCSI session logout successful, setting" 2761 " logout response to ISCSI_LOGOUT_SUCCESS.\n"); 2762 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2763 break; 2764 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 2765 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND) 2766 break; 2767 /* 2768 * For CLOSECONNECTION logout requests carrying 2769 * a matching logout CID -> local CID, the reference 2770 * for the local CID will have been incremented in 2771 * iscsi_logout_closeconnection(). 2772 * 2773 * For CLOSECONNECTION logout requests carrying 2774 * a different CID than the connection it arrived 2775 * on, the connection responding to cmd->logout_cid 2776 * is stopped in iscsit_logout_post_handler_diffcid(). 2777 */ 2778 2779 pr_debug("iSCSI CID: %hu logout on CID: %hu" 2780 " successful.\n", cmd->logout_cid, conn->cid); 2781 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2782 break; 2783 case ISCSI_LOGOUT_REASON_RECOVERY: 2784 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) || 2785 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED)) 2786 break; 2787 /* 2788 * If the connection is still active from our point of view 2789 * force connection recovery to occur. 2790 */ 2791 logout_conn = iscsit_get_conn_from_cid_rcfr(sess, 2792 cmd->logout_cid); 2793 if (logout_conn) { 2794 iscsit_connection_reinstatement_rcfr(logout_conn); 2795 iscsit_dec_conn_usage_count(logout_conn); 2796 } 2797 2798 cr = iscsit_get_inactive_connection_recovery_entry( 2799 conn->sess, cmd->logout_cid); 2800 if (!cr) { 2801 pr_err("Unable to locate CID: %hu for" 2802 " REMOVECONNFORRECOVERY Logout Request.\n", 2803 cmd->logout_cid); 2804 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; 2805 break; 2806 } 2807 2808 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn); 2809 2810 pr_debug("iSCSI REMOVECONNFORRECOVERY logout" 2811 " for recovery for CID: %hu on CID: %hu successful.\n", 2812 cmd->logout_cid, conn->cid); 2813 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2814 break; 2815 default: 2816 pr_err("Unknown cmd->logout_reason: 0x%02x\n", 2817 cmd->logout_reason); 2818 return -1; 2819 } 2820 2821 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 2822 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2823 hdr->response = cmd->logout_response; 2824 hdr->itt = cmd->init_task_tag; 2825 cmd->stat_sn = conn->stat_sn++; 2826 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2827 2828 iscsit_increment_maxcmdsn(cmd, conn->sess); 2829 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2830 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 2831 2832 pr_debug("Built Logout Response ITT: 0x%08x StatSN:" 2833 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n", 2834 cmd->init_task_tag, cmd->stat_sn, hdr->response, 2835 cmd->logout_cid, conn->cid); 2836 2837 return 0; 2838 } 2839 EXPORT_SYMBOL(iscsit_build_logout_rsp); 2840 2841 static int 2842 iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2843 { 2844 struct kvec *iov; 2845 int niov = 0, tx_size, rc; 2846 2847 rc = iscsit_build_logout_rsp(cmd, conn, 2848 (struct iscsi_logout_rsp *)&cmd->pdu[0]); 2849 if (rc < 0) 2850 return rc; 2851 2852 tx_size = ISCSI_HDR_LEN; 2853 iov = &cmd->iov_misc[0]; 2854 iov[niov].iov_base = cmd->pdu; 2855 iov[niov++].iov_len = ISCSI_HDR_LEN; 2856 2857 if (conn->conn_ops->HeaderDigest) { 2858 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2859 2860 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0], 2861 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); 2862 2863 iov[0].iov_len += ISCSI_CRC_LEN; 2864 tx_size += ISCSI_CRC_LEN; 2865 pr_debug("Attaching CRC32C HeaderDigest to" 2866 " Logout Response 0x%08x\n", *header_digest); 2867 } 2868 cmd->iov_misc_count = niov; 2869 cmd->tx_size = tx_size; 2870 2871 return 0; 2872 } 2873 2874 void 2875 iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 2876 struct iscsi_nopin *hdr, bool nopout_response) 2877 { 2878 hdr->opcode = ISCSI_OP_NOOP_IN; 2879 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2880 hton24(hdr->dlength, cmd->buf_ptr_size); 2881 if (nopout_response) 2882 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2883 hdr->itt = cmd->init_task_tag; 2884 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2885 cmd->stat_sn = (nopout_response) ? conn->stat_sn++ : 2886 conn->stat_sn; 2887 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2888 2889 if (nopout_response) 2890 iscsit_increment_maxcmdsn(cmd, conn->sess); 2891 2892 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2893 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 2894 2895 pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x," 2896 " StatSN: 0x%08x, Length %u\n", (nopout_response) ? 2897 "Solicitied" : "Unsolicitied", cmd->init_task_tag, 2898 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); 2899 } 2900 EXPORT_SYMBOL(iscsit_build_nopin_rsp); 2901 2902 /* 2903 * Unsolicited NOPIN, either requesting a response or not. 2904 */ 2905 static int iscsit_send_unsolicited_nopin( 2906 struct iscsi_cmd *cmd, 2907 struct iscsi_conn *conn, 2908 int want_response) 2909 { 2910 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; 2911 int tx_size = ISCSI_HDR_LEN, ret; 2912 2913 iscsit_build_nopin_rsp(cmd, conn, hdr, false); 2914 2915 if (conn->conn_ops->HeaderDigest) { 2916 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2917 2918 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, 2919 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); 2920 2921 tx_size += ISCSI_CRC_LEN; 2922 pr_debug("Attaching CRC32C HeaderDigest to" 2923 " NopIN 0x%08x\n", *header_digest); 2924 } 2925 2926 cmd->iov_misc[0].iov_base = cmd->pdu; 2927 cmd->iov_misc[0].iov_len = tx_size; 2928 cmd->iov_misc_count = 1; 2929 cmd->tx_size = tx_size; 2930 2931 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" 2932 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); 2933 2934 ret = iscsit_send_tx_data(cmd, conn, 1); 2935 if (ret < 0) { 2936 iscsit_tx_thread_wait_for_tcp(conn); 2937 return ret; 2938 } 2939 2940 spin_lock_bh(&cmd->istate_lock); 2941 cmd->i_state = want_response ? 2942 ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS; 2943 spin_unlock_bh(&cmd->istate_lock); 2944 2945 return 0; 2946 } 2947 2948 static int 2949 iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2950 { 2951 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; 2952 struct kvec *iov; 2953 u32 padding = 0; 2954 int niov = 0, tx_size; 2955 2956 iscsit_build_nopin_rsp(cmd, conn, hdr, true); 2957 2958 tx_size = ISCSI_HDR_LEN; 2959 iov = &cmd->iov_misc[0]; 2960 iov[niov].iov_base = cmd->pdu; 2961 iov[niov++].iov_len = ISCSI_HDR_LEN; 2962 2963 if (conn->conn_ops->HeaderDigest) { 2964 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2965 2966 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, 2967 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); 2968 2969 iov[0].iov_len += ISCSI_CRC_LEN; 2970 tx_size += ISCSI_CRC_LEN; 2971 pr_debug("Attaching CRC32C HeaderDigest" 2972 " to NopIn 0x%08x\n", *header_digest); 2973 } 2974 2975 /* 2976 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr. 2977 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size. 2978 */ 2979 if (cmd->buf_ptr_size) { 2980 iov[niov].iov_base = cmd->buf_ptr; 2981 iov[niov++].iov_len = cmd->buf_ptr_size; 2982 tx_size += cmd->buf_ptr_size; 2983 2984 pr_debug("Echoing back %u bytes of ping" 2985 " data.\n", cmd->buf_ptr_size); 2986 2987 padding = ((-cmd->buf_ptr_size) & 3); 2988 if (padding != 0) { 2989 iov[niov].iov_base = &cmd->pad_bytes; 2990 iov[niov++].iov_len = padding; 2991 tx_size += padding; 2992 pr_debug("Attaching %u additional" 2993 " padding bytes.\n", padding); 2994 } 2995 if (conn->conn_ops->DataDigest) { 2996 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2997 cmd->buf_ptr, cmd->buf_ptr_size, 2998 padding, (u8 *)&cmd->pad_bytes, 2999 (u8 *)&cmd->data_crc); 3000 3001 iov[niov].iov_base = &cmd->data_crc; 3002 iov[niov++].iov_len = ISCSI_CRC_LEN; 3003 tx_size += ISCSI_CRC_LEN; 3004 pr_debug("Attached DataDigest for %u" 3005 " bytes of ping data, CRC 0x%08x\n", 3006 cmd->buf_ptr_size, cmd->data_crc); 3007 } 3008 } 3009 3010 cmd->iov_misc_count = niov; 3011 cmd->tx_size = tx_size; 3012 3013 return 0; 3014 } 3015 3016 static int iscsit_send_r2t( 3017 struct iscsi_cmd *cmd, 3018 struct iscsi_conn *conn) 3019 { 3020 int tx_size = 0; 3021 struct iscsi_r2t *r2t; 3022 struct iscsi_r2t_rsp *hdr; 3023 int ret; 3024 3025 r2t = iscsit_get_r2t_from_list(cmd); 3026 if (!r2t) 3027 return -1; 3028 3029 hdr = (struct iscsi_r2t_rsp *) cmd->pdu; 3030 memset(hdr, 0, ISCSI_HDR_LEN); 3031 hdr->opcode = ISCSI_OP_R2T; 3032 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3033 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 3034 (struct scsi_lun *)&hdr->lun); 3035 hdr->itt = cmd->init_task_tag; 3036 r2t->targ_xfer_tag = session_get_next_ttt(conn->sess); 3037 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); 3038 hdr->statsn = cpu_to_be32(conn->stat_sn); 3039 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3040 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 3041 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn); 3042 hdr->data_offset = cpu_to_be32(r2t->offset); 3043 hdr->data_length = cpu_to_be32(r2t->xfer_len); 3044 3045 cmd->iov_misc[0].iov_base = cmd->pdu; 3046 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; 3047 tx_size += ISCSI_HDR_LEN; 3048 3049 if (conn->conn_ops->HeaderDigest) { 3050 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3051 3052 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, 3053 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); 3054 3055 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 3056 tx_size += ISCSI_CRC_LEN; 3057 pr_debug("Attaching CRC32 HeaderDigest for R2T" 3058 " PDU 0x%08x\n", *header_digest); 3059 } 3060 3061 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:" 3062 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n", 3063 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag, 3064 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn, 3065 r2t->offset, r2t->xfer_len, conn->cid); 3066 3067 cmd->iov_misc_count = 1; 3068 cmd->tx_size = tx_size; 3069 3070 spin_lock_bh(&cmd->r2t_lock); 3071 r2t->sent_r2t = 1; 3072 spin_unlock_bh(&cmd->r2t_lock); 3073 3074 ret = iscsit_send_tx_data(cmd, conn, 1); 3075 if (ret < 0) { 3076 iscsit_tx_thread_wait_for_tcp(conn); 3077 return ret; 3078 } 3079 3080 spin_lock_bh(&cmd->dataout_timeout_lock); 3081 iscsit_start_dataout_timer(cmd, conn); 3082 spin_unlock_bh(&cmd->dataout_timeout_lock); 3083 3084 return 0; 3085 } 3086 3087 /* 3088 * @recovery: If called from iscsi_task_reassign_complete_write() for 3089 * connection recovery. 3090 */ 3091 int iscsit_build_r2ts_for_cmd( 3092 struct iscsi_conn *conn, 3093 struct iscsi_cmd *cmd, 3094 bool recovery) 3095 { 3096 int first_r2t = 1; 3097 u32 offset = 0, xfer_len = 0; 3098 3099 spin_lock_bh(&cmd->r2t_lock); 3100 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) { 3101 spin_unlock_bh(&cmd->r2t_lock); 3102 return 0; 3103 } 3104 3105 if (conn->sess->sess_ops->DataSequenceInOrder && 3106 !recovery) 3107 cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done); 3108 3109 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) { 3110 if (conn->sess->sess_ops->DataSequenceInOrder) { 3111 offset = cmd->r2t_offset; 3112 3113 if (first_r2t && recovery) { 3114 int new_data_end = offset + 3115 conn->sess->sess_ops->MaxBurstLength - 3116 cmd->next_burst_len; 3117 3118 if (new_data_end > cmd->se_cmd.data_length) 3119 xfer_len = cmd->se_cmd.data_length - offset; 3120 else 3121 xfer_len = 3122 conn->sess->sess_ops->MaxBurstLength - 3123 cmd->next_burst_len; 3124 } else { 3125 int new_data_end = offset + 3126 conn->sess->sess_ops->MaxBurstLength; 3127 3128 if (new_data_end > cmd->se_cmd.data_length) 3129 xfer_len = cmd->se_cmd.data_length - offset; 3130 else 3131 xfer_len = conn->sess->sess_ops->MaxBurstLength; 3132 } 3133 cmd->r2t_offset += xfer_len; 3134 3135 if (cmd->r2t_offset == cmd->se_cmd.data_length) 3136 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 3137 } else { 3138 struct iscsi_seq *seq; 3139 3140 seq = iscsit_get_seq_holder_for_r2t(cmd); 3141 if (!seq) { 3142 spin_unlock_bh(&cmd->r2t_lock); 3143 return -1; 3144 } 3145 3146 offset = seq->offset; 3147 xfer_len = seq->xfer_len; 3148 3149 if (cmd->seq_send_order == cmd->seq_count) 3150 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 3151 } 3152 cmd->outstanding_r2ts++; 3153 first_r2t = 0; 3154 3155 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) { 3156 spin_unlock_bh(&cmd->r2t_lock); 3157 return -1; 3158 } 3159 3160 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) 3161 break; 3162 } 3163 spin_unlock_bh(&cmd->r2t_lock); 3164 3165 return 0; 3166 } 3167 3168 void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 3169 bool inc_stat_sn, struct iscsi_scsi_rsp *hdr) 3170 { 3171 if (inc_stat_sn) 3172 cmd->stat_sn = conn->stat_sn++; 3173 3174 atomic_long_inc(&conn->sess->rsp_pdus); 3175 3176 memset(hdr, 0, ISCSI_HDR_LEN); 3177 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; 3178 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3179 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 3180 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW; 3181 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 3182 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 3183 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; 3184 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 3185 } 3186 hdr->response = cmd->iscsi_response; 3187 hdr->cmd_status = cmd->se_cmd.scsi_status; 3188 hdr->itt = cmd->init_task_tag; 3189 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3190 3191 iscsit_increment_maxcmdsn(cmd, conn->sess); 3192 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3193 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 3194 3195 pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x," 3196 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n", 3197 cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status, 3198 cmd->se_cmd.scsi_status, conn->cid); 3199 } 3200 EXPORT_SYMBOL(iscsit_build_rsp_pdu); 3201 3202 static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 3203 { 3204 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0]; 3205 struct kvec *iov; 3206 u32 padding = 0, tx_size = 0; 3207 int iov_count = 0; 3208 bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS); 3209 3210 iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr); 3211 3212 iov = &cmd->iov_misc[0]; 3213 iov[iov_count].iov_base = cmd->pdu; 3214 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3215 tx_size += ISCSI_HDR_LEN; 3216 3217 /* 3218 * Attach SENSE DATA payload to iSCSI Response PDU 3219 */ 3220 if (cmd->se_cmd.sense_buffer && 3221 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 3222 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 3223 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer); 3224 cmd->se_cmd.scsi_sense_length += sizeof (__be16); 3225 3226 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 3227 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 3228 iov[iov_count].iov_base = cmd->sense_buffer; 3229 iov[iov_count++].iov_len = 3230 (cmd->se_cmd.scsi_sense_length + padding); 3231 tx_size += cmd->se_cmd.scsi_sense_length; 3232 3233 if (padding) { 3234 memset(cmd->sense_buffer + 3235 cmd->se_cmd.scsi_sense_length, 0, padding); 3236 tx_size += padding; 3237 pr_debug("Adding %u bytes of padding to" 3238 " SENSE.\n", padding); 3239 } 3240 3241 if (conn->conn_ops->DataDigest) { 3242 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3243 cmd->sense_buffer, 3244 (cmd->se_cmd.scsi_sense_length + padding), 3245 0, NULL, (u8 *)&cmd->data_crc); 3246 3247 iov[iov_count].iov_base = &cmd->data_crc; 3248 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3249 tx_size += ISCSI_CRC_LEN; 3250 3251 pr_debug("Attaching CRC32 DataDigest for" 3252 " SENSE, %u bytes CRC 0x%08x\n", 3253 (cmd->se_cmd.scsi_sense_length + padding), 3254 cmd->data_crc); 3255 } 3256 3257 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI" 3258 " Response PDU\n", 3259 cmd->se_cmd.scsi_sense_length); 3260 } 3261 3262 if (conn->conn_ops->HeaderDigest) { 3263 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3264 3265 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, 3266 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); 3267 3268 iov[0].iov_len += ISCSI_CRC_LEN; 3269 tx_size += ISCSI_CRC_LEN; 3270 pr_debug("Attaching CRC32 HeaderDigest for Response" 3271 " PDU 0x%08x\n", *header_digest); 3272 } 3273 3274 cmd->iov_misc_count = iov_count; 3275 cmd->tx_size = tx_size; 3276 3277 return 0; 3278 } 3279 3280 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr) 3281 { 3282 switch (se_tmr->response) { 3283 case TMR_FUNCTION_COMPLETE: 3284 return ISCSI_TMF_RSP_COMPLETE; 3285 case TMR_TASK_DOES_NOT_EXIST: 3286 return ISCSI_TMF_RSP_NO_TASK; 3287 case TMR_LUN_DOES_NOT_EXIST: 3288 return ISCSI_TMF_RSP_NO_LUN; 3289 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: 3290 return ISCSI_TMF_RSP_NOT_SUPPORTED; 3291 case TMR_FUNCTION_REJECTED: 3292 default: 3293 return ISCSI_TMF_RSP_REJECTED; 3294 } 3295 } 3296 3297 void 3298 iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 3299 struct iscsi_tm_rsp *hdr) 3300 { 3301 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; 3302 3303 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 3304 hdr->flags = ISCSI_FLAG_CMD_FINAL; 3305 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); 3306 hdr->itt = cmd->init_task_tag; 3307 cmd->stat_sn = conn->stat_sn++; 3308 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3309 3310 iscsit_increment_maxcmdsn(cmd, conn->sess); 3311 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3312 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 3313 3314 pr_debug("Built Task Management Response ITT: 0x%08x," 3315 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n", 3316 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid); 3317 } 3318 EXPORT_SYMBOL(iscsit_build_task_mgt_rsp); 3319 3320 static int 3321 iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 3322 { 3323 struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0]; 3324 u32 tx_size = 0; 3325 3326 iscsit_build_task_mgt_rsp(cmd, conn, hdr); 3327 3328 cmd->iov_misc[0].iov_base = cmd->pdu; 3329 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; 3330 tx_size += ISCSI_HDR_LEN; 3331 3332 if (conn->conn_ops->HeaderDigest) { 3333 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3334 3335 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, 3336 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); 3337 3338 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 3339 tx_size += ISCSI_CRC_LEN; 3340 pr_debug("Attaching CRC32 HeaderDigest for Task" 3341 " Mgmt Response PDU 0x%08x\n", *header_digest); 3342 } 3343 3344 cmd->iov_misc_count = 1; 3345 cmd->tx_size = tx_size; 3346 3347 return 0; 3348 } 3349 3350 static bool iscsit_check_inaddr_any(struct iscsi_np *np) 3351 { 3352 bool ret = false; 3353 3354 if (np->np_sockaddr.ss_family == AF_INET6) { 3355 const struct sockaddr_in6 sin6 = { 3356 .sin6_addr = IN6ADDR_ANY_INIT }; 3357 struct sockaddr_in6 *sock_in6 = 3358 (struct sockaddr_in6 *)&np->np_sockaddr; 3359 3360 if (!memcmp(sock_in6->sin6_addr.s6_addr, 3361 sin6.sin6_addr.s6_addr, 16)) 3362 ret = true; 3363 } else { 3364 struct sockaddr_in * sock_in = 3365 (struct sockaddr_in *)&np->np_sockaddr; 3366 3367 if (sock_in->sin_addr.s_addr == htonl(INADDR_ANY)) 3368 ret = true; 3369 } 3370 3371 return ret; 3372 } 3373 3374 #define SENDTARGETS_BUF_LIMIT 32768U 3375 3376 static int 3377 iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, 3378 enum iscsit_transport_type network_transport, 3379 int skip_bytes, bool *completed) 3380 { 3381 char *payload = NULL; 3382 struct iscsi_conn *conn = cmd->conn; 3383 struct iscsi_portal_group *tpg; 3384 struct iscsi_tiqn *tiqn; 3385 struct iscsi_tpg_np *tpg_np; 3386 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; 3387 int target_name_printed; 3388 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ 3389 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; 3390 bool active; 3391 3392 buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength, 3393 SENDTARGETS_BUF_LIMIT); 3394 3395 payload = kzalloc(buffer_len, GFP_KERNEL); 3396 if (!payload) { 3397 pr_err("Unable to allocate memory for sendtargets" 3398 " response.\n"); 3399 return -ENOMEM; 3400 } 3401 /* 3402 * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE 3403 * explicit case.. 3404 */ 3405 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) { 3406 text_ptr = strchr(text_in, '='); 3407 if (!text_ptr) { 3408 pr_err("Unable to locate '=' string in text_in:" 3409 " %s\n", text_in); 3410 kfree(payload); 3411 return -EINVAL; 3412 } 3413 /* 3414 * Skip over '=' character.. 3415 */ 3416 text_ptr += 1; 3417 } 3418 3419 spin_lock(&tiqn_lock); 3420 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 3421 if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) && 3422 strcmp(tiqn->tiqn, text_ptr)) { 3423 continue; 3424 } 3425 3426 target_name_printed = 0; 3427 3428 spin_lock(&tiqn->tiqn_tpg_lock); 3429 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { 3430 3431 /* If demo_mode_discovery=0 and generate_node_acls=0 3432 * (demo mode dislabed) do not return 3433 * TargetName+TargetAddress unless a NodeACL exists. 3434 */ 3435 3436 if ((tpg->tpg_attrib.generate_node_acls == 0) && 3437 (tpg->tpg_attrib.demo_mode_discovery == 0) && 3438 (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg, 3439 cmd->conn->sess->sess_ops->InitiatorName))) { 3440 continue; 3441 } 3442 3443 spin_lock(&tpg->tpg_state_lock); 3444 active = (tpg->tpg_state == TPG_STATE_ACTIVE); 3445 spin_unlock(&tpg->tpg_state_lock); 3446 3447 if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets) 3448 continue; 3449 3450 spin_lock(&tpg->tpg_np_lock); 3451 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, 3452 tpg_np_list) { 3453 struct iscsi_np *np = tpg_np->tpg_np; 3454 bool inaddr_any = iscsit_check_inaddr_any(np); 3455 struct sockaddr_storage *sockaddr; 3456 3457 if (np->np_network_transport != network_transport) 3458 continue; 3459 3460 if (!target_name_printed) { 3461 len = sprintf(buf, "TargetName=%s", 3462 tiqn->tiqn); 3463 len += 1; 3464 3465 if ((len + payload_len) > buffer_len) { 3466 spin_unlock(&tpg->tpg_np_lock); 3467 spin_unlock(&tiqn->tiqn_tpg_lock); 3468 end_of_buf = 1; 3469 goto eob; 3470 } 3471 3472 if (skip_bytes && len <= skip_bytes) { 3473 skip_bytes -= len; 3474 } else { 3475 memcpy(payload + payload_len, buf, len); 3476 payload_len += len; 3477 target_name_printed = 1; 3478 if (len > skip_bytes) 3479 skip_bytes = 0; 3480 } 3481 } 3482 3483 if (inaddr_any) 3484 sockaddr = &conn->local_sockaddr; 3485 else 3486 sockaddr = &np->np_sockaddr; 3487 3488 len = sprintf(buf, "TargetAddress=" 3489 "%pISpc,%hu", 3490 sockaddr, 3491 tpg->tpgt); 3492 len += 1; 3493 3494 if ((len + payload_len) > buffer_len) { 3495 spin_unlock(&tpg->tpg_np_lock); 3496 spin_unlock(&tiqn->tiqn_tpg_lock); 3497 end_of_buf = 1; 3498 goto eob; 3499 } 3500 3501 if (skip_bytes && len <= skip_bytes) { 3502 skip_bytes -= len; 3503 } else { 3504 memcpy(payload + payload_len, buf, len); 3505 payload_len += len; 3506 if (len > skip_bytes) 3507 skip_bytes = 0; 3508 } 3509 } 3510 spin_unlock(&tpg->tpg_np_lock); 3511 } 3512 spin_unlock(&tiqn->tiqn_tpg_lock); 3513 eob: 3514 if (end_of_buf) { 3515 *completed = false; 3516 break; 3517 } 3518 3519 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) 3520 break; 3521 } 3522 spin_unlock(&tiqn_lock); 3523 3524 cmd->buf_ptr = payload; 3525 3526 return payload_len; 3527 } 3528 3529 int 3530 iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 3531 struct iscsi_text_rsp *hdr, 3532 enum iscsit_transport_type network_transport) 3533 { 3534 int text_length, padding; 3535 bool completed = true; 3536 3537 text_length = iscsit_build_sendtargets_response(cmd, network_transport, 3538 cmd->read_data_done, 3539 &completed); 3540 if (text_length < 0) 3541 return text_length; 3542 3543 if (completed) { 3544 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3545 } else { 3546 hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE; 3547 cmd->read_data_done += text_length; 3548 if (cmd->targ_xfer_tag == 0xFFFFFFFF) 3549 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); 3550 } 3551 hdr->opcode = ISCSI_OP_TEXT_RSP; 3552 padding = ((-text_length) & 3); 3553 hton24(hdr->dlength, text_length); 3554 hdr->itt = cmd->init_task_tag; 3555 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 3556 cmd->stat_sn = conn->stat_sn++; 3557 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3558 3559 iscsit_increment_maxcmdsn(cmd, conn->sess); 3560 /* 3561 * Reset maxcmdsn_inc in multi-part text payload exchanges to 3562 * correctly increment MaxCmdSN for each response answering a 3563 * non immediate text request with a valid CmdSN. 3564 */ 3565 cmd->maxcmdsn_inc = 0; 3566 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3567 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 3568 3569 pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x," 3570 " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag, 3571 cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid, 3572 !!(hdr->flags & ISCSI_FLAG_CMD_FINAL), 3573 !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)); 3574 3575 return text_length + padding; 3576 } 3577 EXPORT_SYMBOL(iscsit_build_text_rsp); 3578 3579 static int iscsit_send_text_rsp( 3580 struct iscsi_cmd *cmd, 3581 struct iscsi_conn *conn) 3582 { 3583 struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu; 3584 struct kvec *iov; 3585 u32 tx_size = 0; 3586 int text_length, iov_count = 0, rc; 3587 3588 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP); 3589 if (rc < 0) 3590 return rc; 3591 3592 text_length = rc; 3593 iov = &cmd->iov_misc[0]; 3594 iov[iov_count].iov_base = cmd->pdu; 3595 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3596 iov[iov_count].iov_base = cmd->buf_ptr; 3597 iov[iov_count++].iov_len = text_length; 3598 3599 tx_size += (ISCSI_HDR_LEN + text_length); 3600 3601 if (conn->conn_ops->HeaderDigest) { 3602 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3603 3604 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, 3605 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); 3606 3607 iov[0].iov_len += ISCSI_CRC_LEN; 3608 tx_size += ISCSI_CRC_LEN; 3609 pr_debug("Attaching CRC32 HeaderDigest for" 3610 " Text Response PDU 0x%08x\n", *header_digest); 3611 } 3612 3613 if (conn->conn_ops->DataDigest) { 3614 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3615 cmd->buf_ptr, text_length, 3616 0, NULL, (u8 *)&cmd->data_crc); 3617 3618 iov[iov_count].iov_base = &cmd->data_crc; 3619 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3620 tx_size += ISCSI_CRC_LEN; 3621 3622 pr_debug("Attaching DataDigest for %u bytes of text" 3623 " data, CRC 0x%08x\n", text_length, 3624 cmd->data_crc); 3625 } 3626 3627 cmd->iov_misc_count = iov_count; 3628 cmd->tx_size = tx_size; 3629 3630 return 0; 3631 } 3632 3633 void 3634 iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn, 3635 struct iscsi_reject *hdr) 3636 { 3637 hdr->opcode = ISCSI_OP_REJECT; 3638 hdr->reason = cmd->reject_reason; 3639 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3640 hton24(hdr->dlength, ISCSI_HDR_LEN); 3641 hdr->ffffffff = cpu_to_be32(0xffffffff); 3642 cmd->stat_sn = conn->stat_sn++; 3643 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3644 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3645 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 3646 3647 } 3648 EXPORT_SYMBOL(iscsit_build_reject); 3649 3650 static int iscsit_send_reject( 3651 struct iscsi_cmd *cmd, 3652 struct iscsi_conn *conn) 3653 { 3654 struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0]; 3655 struct kvec *iov; 3656 u32 iov_count = 0, tx_size; 3657 3658 iscsit_build_reject(cmd, conn, hdr); 3659 3660 iov = &cmd->iov_misc[0]; 3661 iov[iov_count].iov_base = cmd->pdu; 3662 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3663 iov[iov_count].iov_base = cmd->buf_ptr; 3664 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3665 3666 tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN); 3667 3668 if (conn->conn_ops->HeaderDigest) { 3669 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3670 3671 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, 3672 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); 3673 3674 iov[0].iov_len += ISCSI_CRC_LEN; 3675 tx_size += ISCSI_CRC_LEN; 3676 pr_debug("Attaching CRC32 HeaderDigest for" 3677 " REJECT PDU 0x%08x\n", *header_digest); 3678 } 3679 3680 if (conn->conn_ops->DataDigest) { 3681 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr, 3682 ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc); 3683 3684 iov[iov_count].iov_base = &cmd->data_crc; 3685 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3686 tx_size += ISCSI_CRC_LEN; 3687 pr_debug("Attaching CRC32 DataDigest for REJECT" 3688 " PDU 0x%08x\n", cmd->data_crc); 3689 } 3690 3691 cmd->iov_misc_count = iov_count; 3692 cmd->tx_size = tx_size; 3693 3694 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x," 3695 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid); 3696 3697 return 0; 3698 } 3699 3700 void iscsit_thread_get_cpumask(struct iscsi_conn *conn) 3701 { 3702 int ord, cpu; 3703 /* 3704 * bitmap_id is assigned from iscsit_global->ts_bitmap from 3705 * within iscsit_start_kthreads() 3706 * 3707 * Here we use bitmap_id to determine which CPU that this 3708 * iSCSI connection's RX/TX threads will be scheduled to 3709 * execute upon. 3710 */ 3711 ord = conn->bitmap_id % cpumask_weight(cpu_online_mask); 3712 for_each_online_cpu(cpu) { 3713 if (ord-- == 0) { 3714 cpumask_set_cpu(cpu, conn->conn_cpumask); 3715 return; 3716 } 3717 } 3718 /* 3719 * This should never be reached.. 3720 */ 3721 dump_stack(); 3722 cpumask_setall(conn->conn_cpumask); 3723 } 3724 3725 static inline void iscsit_thread_check_cpumask( 3726 struct iscsi_conn *conn, 3727 struct task_struct *p, 3728 int mode) 3729 { 3730 /* 3731 * mode == 1 signals iscsi_target_tx_thread() usage. 3732 * mode == 0 signals iscsi_target_rx_thread() usage. 3733 */ 3734 if (mode == 1) { 3735 if (!conn->conn_tx_reset_cpumask) 3736 return; 3737 conn->conn_tx_reset_cpumask = 0; 3738 } else { 3739 if (!conn->conn_rx_reset_cpumask) 3740 return; 3741 conn->conn_rx_reset_cpumask = 0; 3742 } 3743 /* 3744 * Update the CPU mask for this single kthread so that 3745 * both TX and RX kthreads are scheduled to run on the 3746 * same CPU. 3747 */ 3748 set_cpus_allowed_ptr(p, conn->conn_cpumask); 3749 } 3750 3751 static int 3752 iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 3753 { 3754 int ret; 3755 3756 switch (state) { 3757 case ISTATE_SEND_R2T: 3758 ret = iscsit_send_r2t(cmd, conn); 3759 if (ret < 0) 3760 goto err; 3761 break; 3762 case ISTATE_REMOVE: 3763 spin_lock_bh(&conn->cmd_lock); 3764 list_del_init(&cmd->i_conn_node); 3765 spin_unlock_bh(&conn->cmd_lock); 3766 3767 iscsit_free_cmd(cmd, false); 3768 break; 3769 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3770 iscsit_mod_nopin_response_timer(conn); 3771 ret = iscsit_send_unsolicited_nopin(cmd, conn, 1); 3772 if (ret < 0) 3773 goto err; 3774 break; 3775 case ISTATE_SEND_NOPIN_NO_RESPONSE: 3776 ret = iscsit_send_unsolicited_nopin(cmd, conn, 0); 3777 if (ret < 0) 3778 goto err; 3779 break; 3780 default: 3781 pr_err("Unknown Opcode: 0x%02x ITT:" 3782 " 0x%08x, i_state: %d on CID: %hu\n", 3783 cmd->iscsi_opcode, cmd->init_task_tag, state, 3784 conn->cid); 3785 goto err; 3786 } 3787 3788 return 0; 3789 3790 err: 3791 return -1; 3792 } 3793 3794 static int 3795 iscsit_handle_immediate_queue(struct iscsi_conn *conn) 3796 { 3797 struct iscsit_transport *t = conn->conn_transport; 3798 struct iscsi_queue_req *qr; 3799 struct iscsi_cmd *cmd; 3800 u8 state; 3801 int ret; 3802 3803 while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) { 3804 atomic_set(&conn->check_immediate_queue, 0); 3805 cmd = qr->cmd; 3806 state = qr->state; 3807 kmem_cache_free(lio_qr_cache, qr); 3808 3809 ret = t->iscsit_immediate_queue(conn, cmd, state); 3810 if (ret < 0) 3811 return ret; 3812 } 3813 3814 return 0; 3815 } 3816 3817 static int 3818 iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 3819 { 3820 int ret; 3821 3822 check_rsp_state: 3823 switch (state) { 3824 case ISTATE_SEND_DATAIN: 3825 ret = iscsit_send_datain(cmd, conn); 3826 if (ret < 0) 3827 goto err; 3828 else if (!ret) 3829 /* more drs */ 3830 goto check_rsp_state; 3831 else if (ret == 1) { 3832 /* all done */ 3833 spin_lock_bh(&cmd->istate_lock); 3834 cmd->i_state = ISTATE_SENT_STATUS; 3835 spin_unlock_bh(&cmd->istate_lock); 3836 3837 if (atomic_read(&conn->check_immediate_queue)) 3838 return 1; 3839 3840 return 0; 3841 } else if (ret == 2) { 3842 /* Still must send status, 3843 SCF_TRANSPORT_TASK_SENSE was set */ 3844 spin_lock_bh(&cmd->istate_lock); 3845 cmd->i_state = ISTATE_SEND_STATUS; 3846 spin_unlock_bh(&cmd->istate_lock); 3847 state = ISTATE_SEND_STATUS; 3848 goto check_rsp_state; 3849 } 3850 3851 break; 3852 case ISTATE_SEND_STATUS: 3853 case ISTATE_SEND_STATUS_RECOVERY: 3854 ret = iscsit_send_response(cmd, conn); 3855 break; 3856 case ISTATE_SEND_LOGOUTRSP: 3857 ret = iscsit_send_logout(cmd, conn); 3858 break; 3859 case ISTATE_SEND_ASYNCMSG: 3860 ret = iscsit_send_conn_drop_async_message( 3861 cmd, conn); 3862 break; 3863 case ISTATE_SEND_NOPIN: 3864 ret = iscsit_send_nopin(cmd, conn); 3865 break; 3866 case ISTATE_SEND_REJECT: 3867 ret = iscsit_send_reject(cmd, conn); 3868 break; 3869 case ISTATE_SEND_TASKMGTRSP: 3870 ret = iscsit_send_task_mgt_rsp(cmd, conn); 3871 if (ret != 0) 3872 break; 3873 ret = iscsit_tmr_post_handler(cmd, conn); 3874 if (ret != 0) 3875 iscsit_fall_back_to_erl0(conn->sess); 3876 break; 3877 case ISTATE_SEND_TEXTRSP: 3878 ret = iscsit_send_text_rsp(cmd, conn); 3879 break; 3880 default: 3881 pr_err("Unknown Opcode: 0x%02x ITT:" 3882 " 0x%08x, i_state: %d on CID: %hu\n", 3883 cmd->iscsi_opcode, cmd->init_task_tag, 3884 state, conn->cid); 3885 goto err; 3886 } 3887 if (ret < 0) 3888 goto err; 3889 3890 if (iscsit_send_tx_data(cmd, conn, 1) < 0) { 3891 iscsit_tx_thread_wait_for_tcp(conn); 3892 iscsit_unmap_iovec(cmd); 3893 goto err; 3894 } 3895 iscsit_unmap_iovec(cmd); 3896 3897 switch (state) { 3898 case ISTATE_SEND_LOGOUTRSP: 3899 if (!iscsit_logout_post_handler(cmd, conn)) 3900 return -ECONNRESET; 3901 /* fall through */ 3902 case ISTATE_SEND_STATUS: 3903 case ISTATE_SEND_ASYNCMSG: 3904 case ISTATE_SEND_NOPIN: 3905 case ISTATE_SEND_STATUS_RECOVERY: 3906 case ISTATE_SEND_TEXTRSP: 3907 case ISTATE_SEND_TASKMGTRSP: 3908 case ISTATE_SEND_REJECT: 3909 spin_lock_bh(&cmd->istate_lock); 3910 cmd->i_state = ISTATE_SENT_STATUS; 3911 spin_unlock_bh(&cmd->istate_lock); 3912 break; 3913 default: 3914 pr_err("Unknown Opcode: 0x%02x ITT:" 3915 " 0x%08x, i_state: %d on CID: %hu\n", 3916 cmd->iscsi_opcode, cmd->init_task_tag, 3917 cmd->i_state, conn->cid); 3918 goto err; 3919 } 3920 3921 if (atomic_read(&conn->check_immediate_queue)) 3922 return 1; 3923 3924 return 0; 3925 3926 err: 3927 return -1; 3928 } 3929 3930 static int iscsit_handle_response_queue(struct iscsi_conn *conn) 3931 { 3932 struct iscsit_transport *t = conn->conn_transport; 3933 struct iscsi_queue_req *qr; 3934 struct iscsi_cmd *cmd; 3935 u8 state; 3936 int ret; 3937 3938 while ((qr = iscsit_get_cmd_from_response_queue(conn))) { 3939 cmd = qr->cmd; 3940 state = qr->state; 3941 kmem_cache_free(lio_qr_cache, qr); 3942 3943 ret = t->iscsit_response_queue(conn, cmd, state); 3944 if (ret == 1 || ret < 0) 3945 return ret; 3946 } 3947 3948 return 0; 3949 } 3950 3951 int iscsi_target_tx_thread(void *arg) 3952 { 3953 int ret = 0; 3954 struct iscsi_conn *conn = arg; 3955 /* 3956 * Allow ourselves to be interrupted by SIGINT so that a 3957 * connection recovery / failure event can be triggered externally. 3958 */ 3959 allow_signal(SIGINT); 3960 3961 while (!kthread_should_stop()) { 3962 /* 3963 * Ensure that both TX and RX per connection kthreads 3964 * are scheduled to run on the same CPU. 3965 */ 3966 iscsit_thread_check_cpumask(conn, current, 1); 3967 3968 wait_event_interruptible(conn->queues_wq, 3969 !iscsit_conn_all_queues_empty(conn)); 3970 3971 if (signal_pending(current)) 3972 goto transport_err; 3973 3974 get_immediate: 3975 ret = iscsit_handle_immediate_queue(conn); 3976 if (ret < 0) 3977 goto transport_err; 3978 3979 ret = iscsit_handle_response_queue(conn); 3980 if (ret == 1) 3981 goto get_immediate; 3982 else if (ret == -ECONNRESET) 3983 goto out; 3984 else if (ret < 0) 3985 goto transport_err; 3986 } 3987 3988 transport_err: 3989 /* 3990 * Avoid the normal connection failure code-path if this connection 3991 * is still within LOGIN mode, and iscsi_np process context is 3992 * responsible for cleaning up the early connection failure. 3993 */ 3994 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) 3995 iscsit_take_action_for_connection_exit(conn); 3996 out: 3997 return 0; 3998 } 3999 4000 static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf) 4001 { 4002 struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf; 4003 struct iscsi_cmd *cmd; 4004 int ret = 0; 4005 4006 switch (hdr->opcode & ISCSI_OPCODE_MASK) { 4007 case ISCSI_OP_SCSI_CMD: 4008 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 4009 if (!cmd) 4010 goto reject; 4011 4012 ret = iscsit_handle_scsi_cmd(conn, cmd, buf); 4013 break; 4014 case ISCSI_OP_SCSI_DATA_OUT: 4015 ret = iscsit_handle_data_out(conn, buf); 4016 break; 4017 case ISCSI_OP_NOOP_OUT: 4018 cmd = NULL; 4019 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 4020 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 4021 if (!cmd) 4022 goto reject; 4023 } 4024 ret = iscsit_handle_nop_out(conn, cmd, buf); 4025 break; 4026 case ISCSI_OP_SCSI_TMFUNC: 4027 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 4028 if (!cmd) 4029 goto reject; 4030 4031 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); 4032 break; 4033 case ISCSI_OP_TEXT: 4034 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { 4035 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 4036 if (!cmd) 4037 goto reject; 4038 } else { 4039 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 4040 if (!cmd) 4041 goto reject; 4042 } 4043 4044 ret = iscsit_handle_text_cmd(conn, cmd, buf); 4045 break; 4046 case ISCSI_OP_LOGOUT: 4047 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 4048 if (!cmd) 4049 goto reject; 4050 4051 ret = iscsit_handle_logout_cmd(conn, cmd, buf); 4052 if (ret > 0) 4053 wait_for_completion_timeout(&conn->conn_logout_comp, 4054 SECONDS_FOR_LOGOUT_COMP * HZ); 4055 break; 4056 case ISCSI_OP_SNACK: 4057 ret = iscsit_handle_snack(conn, buf); 4058 break; 4059 default: 4060 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode); 4061 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 4062 pr_err("Cannot recover from unknown" 4063 " opcode while ERL=0, closing iSCSI connection.\n"); 4064 return -1; 4065 } 4066 pr_err("Unable to recover from unknown opcode while OFMarker=No," 4067 " closing iSCSI connection.\n"); 4068 ret = -1; 4069 break; 4070 } 4071 4072 return ret; 4073 reject: 4074 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 4075 } 4076 4077 int iscsi_target_rx_thread(void *arg) 4078 { 4079 int ret, rc; 4080 u8 buffer[ISCSI_HDR_LEN], opcode; 4081 u32 checksum = 0, digest = 0; 4082 struct iscsi_conn *conn = arg; 4083 struct kvec iov; 4084 /* 4085 * Allow ourselves to be interrupted by SIGINT so that a 4086 * connection recovery / failure event can be triggered externally. 4087 */ 4088 allow_signal(SIGINT); 4089 /* 4090 * Wait for iscsi_post_login_handler() to complete before allowing 4091 * incoming iscsi/tcp socket I/O, and/or failing the connection. 4092 */ 4093 rc = wait_for_completion_interruptible(&conn->rx_login_comp); 4094 if (rc < 0) 4095 return 0; 4096 4097 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { 4098 struct completion comp; 4099 4100 init_completion(&comp); 4101 rc = wait_for_completion_interruptible(&comp); 4102 if (rc < 0) 4103 goto transport_err; 4104 4105 goto transport_err; 4106 } 4107 4108 while (!kthread_should_stop()) { 4109 /* 4110 * Ensure that both TX and RX per connection kthreads 4111 * are scheduled to run on the same CPU. 4112 */ 4113 iscsit_thread_check_cpumask(conn, current, 0); 4114 4115 memset(buffer, 0, ISCSI_HDR_LEN); 4116 memset(&iov, 0, sizeof(struct kvec)); 4117 4118 iov.iov_base = buffer; 4119 iov.iov_len = ISCSI_HDR_LEN; 4120 4121 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); 4122 if (ret != ISCSI_HDR_LEN) { 4123 iscsit_rx_thread_wait_for_tcp(conn); 4124 goto transport_err; 4125 } 4126 4127 if (conn->conn_ops->HeaderDigest) { 4128 iov.iov_base = &digest; 4129 iov.iov_len = ISCSI_CRC_LEN; 4130 4131 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); 4132 if (ret != ISCSI_CRC_LEN) { 4133 iscsit_rx_thread_wait_for_tcp(conn); 4134 goto transport_err; 4135 } 4136 4137 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 4138 buffer, ISCSI_HDR_LEN, 4139 0, NULL, (u8 *)&checksum); 4140 4141 if (digest != checksum) { 4142 pr_err("HeaderDigest CRC32C failed," 4143 " received 0x%08x, computed 0x%08x\n", 4144 digest, checksum); 4145 /* 4146 * Set the PDU to 0xff so it will intentionally 4147 * hit default in the switch below. 4148 */ 4149 memset(buffer, 0xff, ISCSI_HDR_LEN); 4150 atomic_long_inc(&conn->sess->conn_digest_errors); 4151 } else { 4152 pr_debug("Got HeaderDigest CRC32C" 4153 " 0x%08x\n", checksum); 4154 } 4155 } 4156 4157 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) 4158 goto transport_err; 4159 4160 opcode = buffer[0] & ISCSI_OPCODE_MASK; 4161 4162 if (conn->sess->sess_ops->SessionType && 4163 ((!(opcode & ISCSI_OP_TEXT)) || 4164 (!(opcode & ISCSI_OP_LOGOUT)))) { 4165 pr_err("Received illegal iSCSI Opcode: 0x%02x" 4166 " while in Discovery Session, rejecting.\n", opcode); 4167 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 4168 buffer); 4169 goto transport_err; 4170 } 4171 4172 ret = iscsi_target_rx_opcode(conn, buffer); 4173 if (ret < 0) 4174 goto transport_err; 4175 } 4176 4177 transport_err: 4178 if (!signal_pending(current)) 4179 atomic_set(&conn->transport_failed, 1); 4180 iscsit_take_action_for_connection_exit(conn); 4181 return 0; 4182 } 4183 4184 static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) 4185 { 4186 struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL; 4187 struct iscsi_session *sess = conn->sess; 4188 /* 4189 * We expect this function to only ever be called from either RX or TX 4190 * thread context via iscsit_close_connection() once the other context 4191 * has been reset -> returned sleeping pre-handler state. 4192 */ 4193 spin_lock_bh(&conn->cmd_lock); 4194 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { 4195 4196 list_del_init(&cmd->i_conn_node); 4197 spin_unlock_bh(&conn->cmd_lock); 4198 4199 iscsit_increment_maxcmdsn(cmd, sess); 4200 4201 iscsit_free_cmd(cmd, true); 4202 4203 spin_lock_bh(&conn->cmd_lock); 4204 } 4205 spin_unlock_bh(&conn->cmd_lock); 4206 } 4207 4208 static void iscsit_stop_timers_for_cmds( 4209 struct iscsi_conn *conn) 4210 { 4211 struct iscsi_cmd *cmd; 4212 4213 spin_lock_bh(&conn->cmd_lock); 4214 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 4215 if (cmd->data_direction == DMA_TO_DEVICE) 4216 iscsit_stop_dataout_timer(cmd); 4217 } 4218 spin_unlock_bh(&conn->cmd_lock); 4219 } 4220 4221 int iscsit_close_connection( 4222 struct iscsi_conn *conn) 4223 { 4224 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT); 4225 struct iscsi_session *sess = conn->sess; 4226 4227 pr_debug("Closing iSCSI connection CID %hu on SID:" 4228 " %u\n", conn->cid, sess->sid); 4229 /* 4230 * Always up conn_logout_comp for the traditional TCP case just in case 4231 * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout 4232 * response never got sent because the connection failed. 4233 * 4234 * However for iser-target, isert_wait4logout() is using conn_logout_comp 4235 * to signal logout response TX interrupt completion. Go ahead and skip 4236 * this for iser since isert_rx_opcode() does not wait on logout failure, 4237 * and to avoid iscsi_conn pointer dereference in iser-target code. 4238 */ 4239 if (conn->conn_transport->transport_type == ISCSI_TCP) 4240 complete(&conn->conn_logout_comp); 4241 4242 if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { 4243 if (conn->tx_thread && 4244 cmpxchg(&conn->tx_thread_active, true, false)) { 4245 send_sig(SIGINT, conn->tx_thread, 1); 4246 kthread_stop(conn->tx_thread); 4247 } 4248 } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) { 4249 if (conn->rx_thread && 4250 cmpxchg(&conn->rx_thread_active, true, false)) { 4251 send_sig(SIGINT, conn->rx_thread, 1); 4252 kthread_stop(conn->rx_thread); 4253 } 4254 } 4255 4256 spin_lock(&iscsit_global->ts_bitmap_lock); 4257 bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, 4258 get_order(1)); 4259 spin_unlock(&iscsit_global->ts_bitmap_lock); 4260 4261 iscsit_stop_timers_for_cmds(conn); 4262 iscsit_stop_nopin_response_timer(conn); 4263 iscsit_stop_nopin_timer(conn); 4264 4265 if (conn->conn_transport->iscsit_wait_conn) 4266 conn->conn_transport->iscsit_wait_conn(conn); 4267 4268 /* 4269 * During Connection recovery drop unacknowledged out of order 4270 * commands for this connection, and prepare the other commands 4271 * for realligence. 4272 * 4273 * During normal operation clear the out of order commands (but 4274 * do not free the struct iscsi_ooo_cmdsn's) and release all 4275 * struct iscsi_cmds. 4276 */ 4277 if (atomic_read(&conn->connection_recovery)) { 4278 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn); 4279 iscsit_prepare_cmds_for_realligance(conn); 4280 } else { 4281 iscsit_clear_ooo_cmdsns_for_conn(conn); 4282 iscsit_release_commands_from_conn(conn); 4283 } 4284 iscsit_free_queue_reqs_for_conn(conn); 4285 4286 /* 4287 * Handle decrementing session or connection usage count if 4288 * a logout response was not able to be sent because the 4289 * connection failed. Fall back to Session Recovery here. 4290 */ 4291 if (atomic_read(&conn->conn_logout_remove)) { 4292 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) { 4293 iscsit_dec_conn_usage_count(conn); 4294 iscsit_dec_session_usage_count(sess); 4295 } 4296 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) 4297 iscsit_dec_conn_usage_count(conn); 4298 4299 atomic_set(&conn->conn_logout_remove, 0); 4300 atomic_set(&sess->session_reinstatement, 0); 4301 atomic_set(&sess->session_fall_back_to_erl0, 1); 4302 } 4303 4304 spin_lock_bh(&sess->conn_lock); 4305 list_del(&conn->conn_list); 4306 4307 /* 4308 * Attempt to let the Initiator know this connection failed by 4309 * sending an Connection Dropped Async Message on another 4310 * active connection. 4311 */ 4312 if (atomic_read(&conn->connection_recovery)) 4313 iscsit_build_conn_drop_async_message(conn); 4314 4315 spin_unlock_bh(&sess->conn_lock); 4316 4317 /* 4318 * If connection reinstatement is being performed on this connection, 4319 * up the connection reinstatement semaphore that is being blocked on 4320 * in iscsit_cause_connection_reinstatement(). 4321 */ 4322 spin_lock_bh(&conn->state_lock); 4323 if (atomic_read(&conn->sleep_on_conn_wait_comp)) { 4324 spin_unlock_bh(&conn->state_lock); 4325 complete(&conn->conn_wait_comp); 4326 wait_for_completion(&conn->conn_post_wait_comp); 4327 spin_lock_bh(&conn->state_lock); 4328 } 4329 4330 /* 4331 * If connection reinstatement is being performed on this connection 4332 * by receiving a REMOVECONNFORRECOVERY logout request, up the 4333 * connection wait rcfr semaphore that is being blocked on 4334 * an iscsit_connection_reinstatement_rcfr(). 4335 */ 4336 if (atomic_read(&conn->connection_wait_rcfr)) { 4337 spin_unlock_bh(&conn->state_lock); 4338 complete(&conn->conn_wait_rcfr_comp); 4339 wait_for_completion(&conn->conn_post_wait_comp); 4340 spin_lock_bh(&conn->state_lock); 4341 } 4342 atomic_set(&conn->connection_reinstatement, 1); 4343 spin_unlock_bh(&conn->state_lock); 4344 4345 /* 4346 * If any other processes are accessing this connection pointer we 4347 * must wait until they have completed. 4348 */ 4349 iscsit_check_conn_usage_count(conn); 4350 4351 if (conn->conn_rx_hash.tfm) 4352 crypto_free_hash(conn->conn_rx_hash.tfm); 4353 if (conn->conn_tx_hash.tfm) 4354 crypto_free_hash(conn->conn_tx_hash.tfm); 4355 4356 free_cpumask_var(conn->conn_cpumask); 4357 4358 kfree(conn->conn_ops); 4359 conn->conn_ops = NULL; 4360 4361 if (conn->sock) 4362 sock_release(conn->sock); 4363 4364 if (conn->conn_transport->iscsit_free_conn) 4365 conn->conn_transport->iscsit_free_conn(conn); 4366 4367 iscsit_put_transport(conn->conn_transport); 4368 4369 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 4370 conn->conn_state = TARG_CONN_STATE_FREE; 4371 kfree(conn); 4372 4373 spin_lock_bh(&sess->conn_lock); 4374 atomic_dec(&sess->nconn); 4375 pr_debug("Decremented iSCSI connection count to %hu from node:" 4376 " %s\n", atomic_read(&sess->nconn), 4377 sess->sess_ops->InitiatorName); 4378 /* 4379 * Make sure that if one connection fails in an non ERL=2 iSCSI 4380 * Session that they all fail. 4381 */ 4382 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout && 4383 !atomic_read(&sess->session_logout)) 4384 atomic_set(&sess->session_fall_back_to_erl0, 1); 4385 4386 /* 4387 * If this was not the last connection in the session, and we are 4388 * performing session reinstatement or falling back to ERL=0, call 4389 * iscsit_stop_session() without sleeping to shutdown the other 4390 * active connections. 4391 */ 4392 if (atomic_read(&sess->nconn)) { 4393 if (!atomic_read(&sess->session_reinstatement) && 4394 !atomic_read(&sess->session_fall_back_to_erl0)) { 4395 spin_unlock_bh(&sess->conn_lock); 4396 return 0; 4397 } 4398 if (!atomic_read(&sess->session_stop_active)) { 4399 atomic_set(&sess->session_stop_active, 1); 4400 spin_unlock_bh(&sess->conn_lock); 4401 iscsit_stop_session(sess, 0, 0); 4402 return 0; 4403 } 4404 spin_unlock_bh(&sess->conn_lock); 4405 return 0; 4406 } 4407 4408 /* 4409 * If this was the last connection in the session and one of the 4410 * following is occurring: 4411 * 4412 * Session Reinstatement is not being performed, and are falling back 4413 * to ERL=0 call iscsit_close_session(). 4414 * 4415 * Session Logout was requested. iscsit_close_session() will be called 4416 * elsewhere. 4417 * 4418 * Session Continuation is not being performed, start the Time2Retain 4419 * handler and check if sleep_on_sess_wait_sem is active. 4420 */ 4421 if (!atomic_read(&sess->session_reinstatement) && 4422 atomic_read(&sess->session_fall_back_to_erl0)) { 4423 spin_unlock_bh(&sess->conn_lock); 4424 target_put_session(sess->se_sess); 4425 4426 return 0; 4427 } else if (atomic_read(&sess->session_logout)) { 4428 pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); 4429 sess->session_state = TARG_SESS_STATE_FREE; 4430 spin_unlock_bh(&sess->conn_lock); 4431 4432 if (atomic_read(&sess->sleep_on_sess_wait_comp)) 4433 complete(&sess->session_wait_comp); 4434 4435 return 0; 4436 } else { 4437 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); 4438 sess->session_state = TARG_SESS_STATE_FAILED; 4439 4440 if (!atomic_read(&sess->session_continuation)) { 4441 spin_unlock_bh(&sess->conn_lock); 4442 iscsit_start_time2retain_handler(sess); 4443 } else 4444 spin_unlock_bh(&sess->conn_lock); 4445 4446 if (atomic_read(&sess->sleep_on_sess_wait_comp)) 4447 complete(&sess->session_wait_comp); 4448 4449 return 0; 4450 } 4451 spin_unlock_bh(&sess->conn_lock); 4452 4453 return 0; 4454 } 4455 4456 int iscsit_close_session(struct iscsi_session *sess) 4457 { 4458 struct iscsi_portal_group *tpg = sess->tpg; 4459 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4460 4461 if (atomic_read(&sess->nconn)) { 4462 pr_err("%d connection(s) still exist for iSCSI session" 4463 " to %s\n", atomic_read(&sess->nconn), 4464 sess->sess_ops->InitiatorName); 4465 BUG(); 4466 } 4467 4468 spin_lock_bh(&se_tpg->session_lock); 4469 atomic_set(&sess->session_logout, 1); 4470 atomic_set(&sess->session_reinstatement, 1); 4471 iscsit_stop_time2retain_timer(sess); 4472 spin_unlock_bh(&se_tpg->session_lock); 4473 4474 /* 4475 * transport_deregister_session_configfs() will clear the 4476 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context 4477 * can be setting it again with __transport_register_session() in 4478 * iscsi_post_login_handler() again after the iscsit_stop_session() 4479 * completes in iscsi_np context. 4480 */ 4481 transport_deregister_session_configfs(sess->se_sess); 4482 4483 /* 4484 * If any other processes are accessing this session pointer we must 4485 * wait until they have completed. If we are in an interrupt (the 4486 * time2retain handler) and contain and active session usage count we 4487 * restart the timer and exit. 4488 */ 4489 if (!in_interrupt()) { 4490 if (iscsit_check_session_usage_count(sess) == 1) 4491 iscsit_stop_session(sess, 1, 1); 4492 } else { 4493 if (iscsit_check_session_usage_count(sess) == 2) { 4494 atomic_set(&sess->session_logout, 0); 4495 iscsit_start_time2retain_handler(sess); 4496 return 0; 4497 } 4498 } 4499 4500 transport_deregister_session(sess->se_sess); 4501 4502 if (sess->sess_ops->ErrorRecoveryLevel == 2) 4503 iscsit_free_connection_recovery_entires(sess); 4504 4505 iscsit_free_all_ooo_cmdsns(sess); 4506 4507 spin_lock_bh(&se_tpg->session_lock); 4508 pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); 4509 sess->session_state = TARG_SESS_STATE_FREE; 4510 pr_debug("Released iSCSI session from node: %s\n", 4511 sess->sess_ops->InitiatorName); 4512 tpg->nsessions--; 4513 if (tpg->tpg_tiqn) 4514 tpg->tpg_tiqn->tiqn_nsessions--; 4515 4516 pr_debug("Decremented number of active iSCSI Sessions on" 4517 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions); 4518 4519 spin_lock(&sess_idr_lock); 4520 idr_remove(&sess_idr, sess->session_index); 4521 spin_unlock(&sess_idr_lock); 4522 4523 kfree(sess->sess_ops); 4524 sess->sess_ops = NULL; 4525 spin_unlock_bh(&se_tpg->session_lock); 4526 4527 kfree(sess); 4528 return 0; 4529 } 4530 4531 static void iscsit_logout_post_handler_closesession( 4532 struct iscsi_conn *conn) 4533 { 4534 struct iscsi_session *sess = conn->sess; 4535 int sleep = 1; 4536 /* 4537 * Traditional iscsi/tcp will invoke this logic from TX thread 4538 * context during session logout, so clear tx_thread_active and 4539 * sleep if iscsit_close_connection() has not already occured. 4540 * 4541 * Since iser-target invokes this logic from it's own workqueue, 4542 * always sleep waiting for RX/TX thread shutdown to complete 4543 * within iscsit_close_connection(). 4544 */ 4545 if (conn->conn_transport->transport_type == ISCSI_TCP) 4546 sleep = cmpxchg(&conn->tx_thread_active, true, false); 4547 4548 atomic_set(&conn->conn_logout_remove, 0); 4549 complete(&conn->conn_logout_comp); 4550 4551 iscsit_dec_conn_usage_count(conn); 4552 iscsit_stop_session(sess, sleep, sleep); 4553 iscsit_dec_session_usage_count(sess); 4554 target_put_session(sess->se_sess); 4555 } 4556 4557 static void iscsit_logout_post_handler_samecid( 4558 struct iscsi_conn *conn) 4559 { 4560 int sleep = 1; 4561 4562 if (conn->conn_transport->transport_type == ISCSI_TCP) 4563 sleep = cmpxchg(&conn->tx_thread_active, true, false); 4564 4565 atomic_set(&conn->conn_logout_remove, 0); 4566 complete(&conn->conn_logout_comp); 4567 4568 iscsit_cause_connection_reinstatement(conn, sleep); 4569 iscsit_dec_conn_usage_count(conn); 4570 } 4571 4572 static void iscsit_logout_post_handler_diffcid( 4573 struct iscsi_conn *conn, 4574 u16 cid) 4575 { 4576 struct iscsi_conn *l_conn; 4577 struct iscsi_session *sess = conn->sess; 4578 bool conn_found = false; 4579 4580 if (!sess) 4581 return; 4582 4583 spin_lock_bh(&sess->conn_lock); 4584 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) { 4585 if (l_conn->cid == cid) { 4586 iscsit_inc_conn_usage_count(l_conn); 4587 conn_found = true; 4588 break; 4589 } 4590 } 4591 spin_unlock_bh(&sess->conn_lock); 4592 4593 if (!conn_found) 4594 return; 4595 4596 if (l_conn->sock) 4597 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN); 4598 4599 spin_lock_bh(&l_conn->state_lock); 4600 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 4601 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; 4602 spin_unlock_bh(&l_conn->state_lock); 4603 4604 iscsit_cause_connection_reinstatement(l_conn, 1); 4605 iscsit_dec_conn_usage_count(l_conn); 4606 } 4607 4608 /* 4609 * Return of 0 causes the TX thread to restart. 4610 */ 4611 int iscsit_logout_post_handler( 4612 struct iscsi_cmd *cmd, 4613 struct iscsi_conn *conn) 4614 { 4615 int ret = 0; 4616 4617 switch (cmd->logout_reason) { 4618 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 4619 switch (cmd->logout_response) { 4620 case ISCSI_LOGOUT_SUCCESS: 4621 case ISCSI_LOGOUT_CLEANUP_FAILED: 4622 default: 4623 iscsit_logout_post_handler_closesession(conn); 4624 break; 4625 } 4626 ret = 0; 4627 break; 4628 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 4629 if (conn->cid == cmd->logout_cid) { 4630 switch (cmd->logout_response) { 4631 case ISCSI_LOGOUT_SUCCESS: 4632 case ISCSI_LOGOUT_CLEANUP_FAILED: 4633 default: 4634 iscsit_logout_post_handler_samecid(conn); 4635 break; 4636 } 4637 ret = 0; 4638 } else { 4639 switch (cmd->logout_response) { 4640 case ISCSI_LOGOUT_SUCCESS: 4641 iscsit_logout_post_handler_diffcid(conn, 4642 cmd->logout_cid); 4643 break; 4644 case ISCSI_LOGOUT_CID_NOT_FOUND: 4645 case ISCSI_LOGOUT_CLEANUP_FAILED: 4646 default: 4647 break; 4648 } 4649 ret = 1; 4650 } 4651 break; 4652 case ISCSI_LOGOUT_REASON_RECOVERY: 4653 switch (cmd->logout_response) { 4654 case ISCSI_LOGOUT_SUCCESS: 4655 case ISCSI_LOGOUT_CID_NOT_FOUND: 4656 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED: 4657 case ISCSI_LOGOUT_CLEANUP_FAILED: 4658 default: 4659 break; 4660 } 4661 ret = 1; 4662 break; 4663 default: 4664 break; 4665 4666 } 4667 return ret; 4668 } 4669 EXPORT_SYMBOL(iscsit_logout_post_handler); 4670 4671 void iscsit_fail_session(struct iscsi_session *sess) 4672 { 4673 struct iscsi_conn *conn; 4674 4675 spin_lock_bh(&sess->conn_lock); 4676 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 4677 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n"); 4678 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT; 4679 } 4680 spin_unlock_bh(&sess->conn_lock); 4681 4682 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); 4683 sess->session_state = TARG_SESS_STATE_FAILED; 4684 } 4685 4686 int iscsit_free_session(struct iscsi_session *sess) 4687 { 4688 u16 conn_count = atomic_read(&sess->nconn); 4689 struct iscsi_conn *conn, *conn_tmp = NULL; 4690 int is_last; 4691 4692 spin_lock_bh(&sess->conn_lock); 4693 atomic_set(&sess->sleep_on_sess_wait_comp, 1); 4694 4695 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, 4696 conn_list) { 4697 if (conn_count == 0) 4698 break; 4699 4700 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { 4701 is_last = 1; 4702 } else { 4703 iscsit_inc_conn_usage_count(conn_tmp); 4704 is_last = 0; 4705 } 4706 iscsit_inc_conn_usage_count(conn); 4707 4708 spin_unlock_bh(&sess->conn_lock); 4709 iscsit_cause_connection_reinstatement(conn, 1); 4710 spin_lock_bh(&sess->conn_lock); 4711 4712 iscsit_dec_conn_usage_count(conn); 4713 if (is_last == 0) 4714 iscsit_dec_conn_usage_count(conn_tmp); 4715 4716 conn_count--; 4717 } 4718 4719 if (atomic_read(&sess->nconn)) { 4720 spin_unlock_bh(&sess->conn_lock); 4721 wait_for_completion(&sess->session_wait_comp); 4722 } else 4723 spin_unlock_bh(&sess->conn_lock); 4724 4725 target_put_session(sess->se_sess); 4726 return 0; 4727 } 4728 4729 void iscsit_stop_session( 4730 struct iscsi_session *sess, 4731 int session_sleep, 4732 int connection_sleep) 4733 { 4734 u16 conn_count = atomic_read(&sess->nconn); 4735 struct iscsi_conn *conn, *conn_tmp = NULL; 4736 int is_last; 4737 4738 spin_lock_bh(&sess->conn_lock); 4739 if (session_sleep) 4740 atomic_set(&sess->sleep_on_sess_wait_comp, 1); 4741 4742 if (connection_sleep) { 4743 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, 4744 conn_list) { 4745 if (conn_count == 0) 4746 break; 4747 4748 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { 4749 is_last = 1; 4750 } else { 4751 iscsit_inc_conn_usage_count(conn_tmp); 4752 is_last = 0; 4753 } 4754 iscsit_inc_conn_usage_count(conn); 4755 4756 spin_unlock_bh(&sess->conn_lock); 4757 iscsit_cause_connection_reinstatement(conn, 1); 4758 spin_lock_bh(&sess->conn_lock); 4759 4760 iscsit_dec_conn_usage_count(conn); 4761 if (is_last == 0) 4762 iscsit_dec_conn_usage_count(conn_tmp); 4763 conn_count--; 4764 } 4765 } else { 4766 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) 4767 iscsit_cause_connection_reinstatement(conn, 0); 4768 } 4769 4770 if (session_sleep && atomic_read(&sess->nconn)) { 4771 spin_unlock_bh(&sess->conn_lock); 4772 wait_for_completion(&sess->session_wait_comp); 4773 } else 4774 spin_unlock_bh(&sess->conn_lock); 4775 } 4776 4777 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) 4778 { 4779 struct iscsi_session *sess; 4780 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4781 struct se_session *se_sess, *se_sess_tmp; 4782 LIST_HEAD(free_list); 4783 int session_count = 0; 4784 4785 spin_lock_bh(&se_tpg->session_lock); 4786 if (tpg->nsessions && !force) { 4787 spin_unlock_bh(&se_tpg->session_lock); 4788 return -1; 4789 } 4790 4791 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, 4792 sess_list) { 4793 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 4794 4795 spin_lock(&sess->conn_lock); 4796 if (atomic_read(&sess->session_fall_back_to_erl0) || 4797 atomic_read(&sess->session_logout) || 4798 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { 4799 spin_unlock(&sess->conn_lock); 4800 continue; 4801 } 4802 atomic_set(&sess->session_reinstatement, 1); 4803 spin_unlock(&sess->conn_lock); 4804 4805 list_move_tail(&se_sess->sess_list, &free_list); 4806 } 4807 spin_unlock_bh(&se_tpg->session_lock); 4808 4809 list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) { 4810 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 4811 4812 iscsit_free_session(sess); 4813 session_count++; 4814 } 4815 4816 pr_debug("Released %d iSCSI Session(s) from Target Portal" 4817 " Group: %hu\n", session_count, tpg->tpgt); 4818 return 0; 4819 } 4820 4821 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure"); 4822 MODULE_VERSION("4.1.x"); 4823 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 4824 MODULE_LICENSE("GPL"); 4825 4826 module_init(iscsi_target_init_module); 4827 module_exit(iscsi_target_cleanup_module); 4828