1 /******************************************************************************* 2 * This file contains main functions related to the iSCSI Target Core Driver. 3 * 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 7 * 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 ******************************************************************************/ 20 21 #include <linux/string.h> 22 #include <linux/kthread.h> 23 #include <linux/crypto.h> 24 #include <linux/completion.h> 25 #include <linux/module.h> 26 #include <linux/idr.h> 27 #include <asm/unaligned.h> 28 #include <scsi/scsi_device.h> 29 #include <scsi/iscsi_proto.h> 30 #include <scsi/scsi_tcq.h> 31 #include <target/target_core_base.h> 32 #include <target/target_core_fabric.h> 33 #include <target/target_core_configfs.h> 34 35 #include "iscsi_target_core.h" 36 #include "iscsi_target_parameters.h" 37 #include "iscsi_target_seq_pdu_list.h" 38 #include "iscsi_target_tq.h" 39 #include "iscsi_target_configfs.h" 40 #include "iscsi_target_datain_values.h" 41 #include "iscsi_target_erl0.h" 42 #include "iscsi_target_erl1.h" 43 #include "iscsi_target_erl2.h" 44 #include "iscsi_target_login.h" 45 #include "iscsi_target_tmr.h" 46 #include "iscsi_target_tpg.h" 47 #include "iscsi_target_util.h" 48 #include "iscsi_target.h" 49 #include "iscsi_target_device.h" 50 #include "iscsi_target_stat.h" 51 52 static LIST_HEAD(g_tiqn_list); 53 static LIST_HEAD(g_np_list); 54 static DEFINE_SPINLOCK(tiqn_lock); 55 static DEFINE_SPINLOCK(np_lock); 56 57 static struct idr tiqn_idr; 58 struct idr sess_idr; 59 struct mutex auth_id_lock; 60 spinlock_t sess_idr_lock; 61 62 struct iscsit_global *iscsit_global; 63 64 struct kmem_cache *lio_cmd_cache; 65 struct kmem_cache *lio_qr_cache; 66 struct kmem_cache *lio_dr_cache; 67 struct kmem_cache *lio_ooo_cache; 68 struct kmem_cache *lio_r2t_cache; 69 70 static int iscsit_handle_immediate_data(struct iscsi_cmd *, 71 unsigned char *buf, u32); 72 static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *); 73 74 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf) 75 { 76 struct iscsi_tiqn *tiqn = NULL; 77 78 spin_lock(&tiqn_lock); 79 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 80 if (!strcmp(tiqn->tiqn, buf)) { 81 82 spin_lock(&tiqn->tiqn_state_lock); 83 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { 84 tiqn->tiqn_access_count++; 85 spin_unlock(&tiqn->tiqn_state_lock); 86 spin_unlock(&tiqn_lock); 87 return tiqn; 88 } 89 spin_unlock(&tiqn->tiqn_state_lock); 90 } 91 } 92 spin_unlock(&tiqn_lock); 93 94 return NULL; 95 } 96 97 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn) 98 { 99 spin_lock(&tiqn->tiqn_state_lock); 100 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { 101 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN; 102 spin_unlock(&tiqn->tiqn_state_lock); 103 return 0; 104 } 105 spin_unlock(&tiqn->tiqn_state_lock); 106 107 return -1; 108 } 109 110 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn) 111 { 112 spin_lock(&tiqn->tiqn_state_lock); 113 tiqn->tiqn_access_count--; 114 spin_unlock(&tiqn->tiqn_state_lock); 115 } 116 117 /* 118 * Note that IQN formatting is expected to be done in userspace, and 119 * no explict IQN format checks are done here. 120 */ 121 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf) 122 { 123 struct iscsi_tiqn *tiqn = NULL; 124 int ret; 125 126 if (strlen(buf) >= ISCSI_IQN_LEN) { 127 pr_err("Target IQN exceeds %d bytes\n", 128 ISCSI_IQN_LEN); 129 return ERR_PTR(-EINVAL); 130 } 131 132 tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL); 133 if (!tiqn) { 134 pr_err("Unable to allocate struct iscsi_tiqn\n"); 135 return ERR_PTR(-ENOMEM); 136 } 137 138 sprintf(tiqn->tiqn, "%s", buf); 139 INIT_LIST_HEAD(&tiqn->tiqn_list); 140 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list); 141 spin_lock_init(&tiqn->tiqn_state_lock); 142 spin_lock_init(&tiqn->tiqn_tpg_lock); 143 spin_lock_init(&tiqn->sess_err_stats.lock); 144 spin_lock_init(&tiqn->login_stats.lock); 145 spin_lock_init(&tiqn->logout_stats.lock); 146 147 if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) { 148 pr_err("idr_pre_get() for tiqn_idr failed\n"); 149 kfree(tiqn); 150 return ERR_PTR(-ENOMEM); 151 } 152 tiqn->tiqn_state = TIQN_STATE_ACTIVE; 153 154 spin_lock(&tiqn_lock); 155 ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index); 156 if (ret < 0) { 157 pr_err("idr_get_new() failed for tiqn->tiqn_index\n"); 158 spin_unlock(&tiqn_lock); 159 kfree(tiqn); 160 return ERR_PTR(ret); 161 } 162 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list); 163 spin_unlock(&tiqn_lock); 164 165 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn); 166 167 return tiqn; 168 169 } 170 171 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn) 172 { 173 /* 174 * Wait for accesses to said struct iscsi_tiqn to end. 175 */ 176 spin_lock(&tiqn->tiqn_state_lock); 177 while (tiqn->tiqn_access_count != 0) { 178 spin_unlock(&tiqn->tiqn_state_lock); 179 msleep(10); 180 spin_lock(&tiqn->tiqn_state_lock); 181 } 182 spin_unlock(&tiqn->tiqn_state_lock); 183 } 184 185 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn) 186 { 187 /* 188 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN 189 * while holding tiqn->tiqn_state_lock. This means that all subsequent 190 * attempts to access this struct iscsi_tiqn will fail from both transport 191 * fabric and control code paths. 192 */ 193 if (iscsit_set_tiqn_shutdown(tiqn) < 0) { 194 pr_err("iscsit_set_tiqn_shutdown() failed\n"); 195 return; 196 } 197 198 iscsit_wait_for_tiqn(tiqn); 199 200 spin_lock(&tiqn_lock); 201 list_del(&tiqn->tiqn_list); 202 idr_remove(&tiqn_idr, tiqn->tiqn_index); 203 spin_unlock(&tiqn_lock); 204 205 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n", 206 tiqn->tiqn); 207 kfree(tiqn); 208 } 209 210 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) 211 { 212 int ret; 213 /* 214 * Determine if the network portal is accepting storage traffic. 215 */ 216 spin_lock_bh(&np->np_thread_lock); 217 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 218 spin_unlock_bh(&np->np_thread_lock); 219 return -1; 220 } 221 if (np->np_login_tpg) { 222 pr_err("np->np_login_tpg() is not NULL!\n"); 223 spin_unlock_bh(&np->np_thread_lock); 224 return -1; 225 } 226 spin_unlock_bh(&np->np_thread_lock); 227 /* 228 * Determine if the portal group is accepting storage traffic. 229 */ 230 spin_lock_bh(&tpg->tpg_state_lock); 231 if (tpg->tpg_state != TPG_STATE_ACTIVE) { 232 spin_unlock_bh(&tpg->tpg_state_lock); 233 return -1; 234 } 235 spin_unlock_bh(&tpg->tpg_state_lock); 236 237 /* 238 * Here we serialize access across the TIQN+TPG Tuple. 239 */ 240 ret = mutex_lock_interruptible(&tpg->np_login_lock); 241 if ((ret != 0) || signal_pending(current)) 242 return -1; 243 244 spin_lock_bh(&np->np_thread_lock); 245 np->np_login_tpg = tpg; 246 spin_unlock_bh(&np->np_thread_lock); 247 248 return 0; 249 } 250 251 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) 252 { 253 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 254 255 spin_lock_bh(&np->np_thread_lock); 256 np->np_login_tpg = NULL; 257 spin_unlock_bh(&np->np_thread_lock); 258 259 mutex_unlock(&tpg->np_login_lock); 260 261 if (tiqn) 262 iscsit_put_tiqn_for_login(tiqn); 263 264 return 0; 265 } 266 267 static struct iscsi_np *iscsit_get_np( 268 struct __kernel_sockaddr_storage *sockaddr, 269 int network_transport) 270 { 271 struct sockaddr_in *sock_in, *sock_in_e; 272 struct sockaddr_in6 *sock_in6, *sock_in6_e; 273 struct iscsi_np *np; 274 int ip_match = 0; 275 u16 port; 276 277 spin_lock_bh(&np_lock); 278 list_for_each_entry(np, &g_np_list, np_list) { 279 spin_lock(&np->np_thread_lock); 280 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 281 spin_unlock(&np->np_thread_lock); 282 continue; 283 } 284 285 if (sockaddr->ss_family == AF_INET6) { 286 sock_in6 = (struct sockaddr_in6 *)sockaddr; 287 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr; 288 289 if (!memcmp(&sock_in6->sin6_addr.in6_u, 290 &sock_in6_e->sin6_addr.in6_u, 291 sizeof(struct in6_addr))) 292 ip_match = 1; 293 294 port = ntohs(sock_in6->sin6_port); 295 } else { 296 sock_in = (struct sockaddr_in *)sockaddr; 297 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr; 298 299 if (sock_in->sin_addr.s_addr == 300 sock_in_e->sin_addr.s_addr) 301 ip_match = 1; 302 303 port = ntohs(sock_in->sin_port); 304 } 305 306 if ((ip_match == 1) && (np->np_port == port) && 307 (np->np_network_transport == network_transport)) { 308 /* 309 * Increment the np_exports reference count now to 310 * prevent iscsit_del_np() below from being called 311 * while iscsi_tpg_add_network_portal() is called. 312 */ 313 np->np_exports++; 314 spin_unlock(&np->np_thread_lock); 315 spin_unlock_bh(&np_lock); 316 return np; 317 } 318 spin_unlock(&np->np_thread_lock); 319 } 320 spin_unlock_bh(&np_lock); 321 322 return NULL; 323 } 324 325 struct iscsi_np *iscsit_add_np( 326 struct __kernel_sockaddr_storage *sockaddr, 327 char *ip_str, 328 int network_transport) 329 { 330 struct sockaddr_in *sock_in; 331 struct sockaddr_in6 *sock_in6; 332 struct iscsi_np *np; 333 int ret; 334 /* 335 * Locate the existing struct iscsi_np if already active.. 336 */ 337 np = iscsit_get_np(sockaddr, network_transport); 338 if (np) 339 return np; 340 341 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL); 342 if (!np) { 343 pr_err("Unable to allocate memory for struct iscsi_np\n"); 344 return ERR_PTR(-ENOMEM); 345 } 346 347 np->np_flags |= NPF_IP_NETWORK; 348 if (sockaddr->ss_family == AF_INET6) { 349 sock_in6 = (struct sockaddr_in6 *)sockaddr; 350 snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str); 351 np->np_port = ntohs(sock_in6->sin6_port); 352 } else { 353 sock_in = (struct sockaddr_in *)sockaddr; 354 sprintf(np->np_ip, "%s", ip_str); 355 np->np_port = ntohs(sock_in->sin_port); 356 } 357 358 np->np_network_transport = network_transport; 359 spin_lock_init(&np->np_thread_lock); 360 init_completion(&np->np_restart_comp); 361 INIT_LIST_HEAD(&np->np_list); 362 363 ret = iscsi_target_setup_login_socket(np, sockaddr); 364 if (ret != 0) { 365 kfree(np); 366 return ERR_PTR(ret); 367 } 368 369 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np"); 370 if (IS_ERR(np->np_thread)) { 371 pr_err("Unable to create kthread: iscsi_np\n"); 372 ret = PTR_ERR(np->np_thread); 373 kfree(np); 374 return ERR_PTR(ret); 375 } 376 /* 377 * Increment the np_exports reference count now to prevent 378 * iscsit_del_np() below from being run while a new call to 379 * iscsi_tpg_add_network_portal() for a matching iscsi_np is 380 * active. We don't need to hold np->np_thread_lock at this 381 * point because iscsi_np has not been added to g_np_list yet. 382 */ 383 np->np_exports = 1; 384 385 spin_lock_bh(&np_lock); 386 list_add_tail(&np->np_list, &g_np_list); 387 spin_unlock_bh(&np_lock); 388 389 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", 390 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? 391 "TCP" : "SCTP"); 392 393 return np; 394 } 395 396 int iscsit_reset_np_thread( 397 struct iscsi_np *np, 398 struct iscsi_tpg_np *tpg_np, 399 struct iscsi_portal_group *tpg) 400 { 401 spin_lock_bh(&np->np_thread_lock); 402 if (tpg && tpg_np) { 403 /* 404 * The reset operation need only be performed when the 405 * passed struct iscsi_portal_group has a login in progress 406 * to one of the network portals. 407 */ 408 if (tpg_np->tpg_np->np_login_tpg != tpg) { 409 spin_unlock_bh(&np->np_thread_lock); 410 return 0; 411 } 412 } 413 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) { 414 spin_unlock_bh(&np->np_thread_lock); 415 return 0; 416 } 417 np->np_thread_state = ISCSI_NP_THREAD_RESET; 418 419 if (np->np_thread) { 420 spin_unlock_bh(&np->np_thread_lock); 421 send_sig(SIGINT, np->np_thread, 1); 422 wait_for_completion(&np->np_restart_comp); 423 spin_lock_bh(&np->np_thread_lock); 424 } 425 spin_unlock_bh(&np->np_thread_lock); 426 427 return 0; 428 } 429 430 int iscsit_del_np_comm(struct iscsi_np *np) 431 { 432 if (np->np_socket) 433 sock_release(np->np_socket); 434 return 0; 435 } 436 437 int iscsit_del_np(struct iscsi_np *np) 438 { 439 spin_lock_bh(&np->np_thread_lock); 440 np->np_exports--; 441 if (np->np_exports) { 442 spin_unlock_bh(&np->np_thread_lock); 443 return 0; 444 } 445 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN; 446 spin_unlock_bh(&np->np_thread_lock); 447 448 if (np->np_thread) { 449 /* 450 * We need to send the signal to wakeup Linux/Net 451 * which may be sleeping in sock_accept().. 452 */ 453 send_sig(SIGINT, np->np_thread, 1); 454 kthread_stop(np->np_thread); 455 } 456 iscsit_del_np_comm(np); 457 458 spin_lock_bh(&np_lock); 459 list_del(&np->np_list); 460 spin_unlock_bh(&np_lock); 461 462 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", 463 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? 464 "TCP" : "SCTP"); 465 466 kfree(np); 467 return 0; 468 } 469 470 static int __init iscsi_target_init_module(void) 471 { 472 int ret = 0; 473 474 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); 475 476 iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL); 477 if (!iscsit_global) { 478 pr_err("Unable to allocate memory for iscsit_global\n"); 479 return -1; 480 } 481 mutex_init(&auth_id_lock); 482 spin_lock_init(&sess_idr_lock); 483 idr_init(&tiqn_idr); 484 idr_init(&sess_idr); 485 486 ret = iscsi_target_register_configfs(); 487 if (ret < 0) 488 goto out; 489 490 ret = iscsi_thread_set_init(); 491 if (ret < 0) 492 goto configfs_out; 493 494 if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) != 495 TARGET_THREAD_SET_COUNT) { 496 pr_err("iscsi_allocate_thread_sets() returned" 497 " unexpected value!\n"); 498 goto ts_out1; 499 } 500 501 lio_cmd_cache = kmem_cache_create("lio_cmd_cache", 502 sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd), 503 0, NULL); 504 if (!lio_cmd_cache) { 505 pr_err("Unable to kmem_cache_create() for" 506 " lio_cmd_cache\n"); 507 goto ts_out2; 508 } 509 510 lio_qr_cache = kmem_cache_create("lio_qr_cache", 511 sizeof(struct iscsi_queue_req), 512 __alignof__(struct iscsi_queue_req), 0, NULL); 513 if (!lio_qr_cache) { 514 pr_err("nable to kmem_cache_create() for" 515 " lio_qr_cache\n"); 516 goto cmd_out; 517 } 518 519 lio_dr_cache = kmem_cache_create("lio_dr_cache", 520 sizeof(struct iscsi_datain_req), 521 __alignof__(struct iscsi_datain_req), 0, NULL); 522 if (!lio_dr_cache) { 523 pr_err("Unable to kmem_cache_create() for" 524 " lio_dr_cache\n"); 525 goto qr_out; 526 } 527 528 lio_ooo_cache = kmem_cache_create("lio_ooo_cache", 529 sizeof(struct iscsi_ooo_cmdsn), 530 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL); 531 if (!lio_ooo_cache) { 532 pr_err("Unable to kmem_cache_create() for" 533 " lio_ooo_cache\n"); 534 goto dr_out; 535 } 536 537 lio_r2t_cache = kmem_cache_create("lio_r2t_cache", 538 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t), 539 0, NULL); 540 if (!lio_r2t_cache) { 541 pr_err("Unable to kmem_cache_create() for" 542 " lio_r2t_cache\n"); 543 goto ooo_out; 544 } 545 546 if (iscsit_load_discovery_tpg() < 0) 547 goto r2t_out; 548 549 return ret; 550 r2t_out: 551 kmem_cache_destroy(lio_r2t_cache); 552 ooo_out: 553 kmem_cache_destroy(lio_ooo_cache); 554 dr_out: 555 kmem_cache_destroy(lio_dr_cache); 556 qr_out: 557 kmem_cache_destroy(lio_qr_cache); 558 cmd_out: 559 kmem_cache_destroy(lio_cmd_cache); 560 ts_out2: 561 iscsi_deallocate_thread_sets(); 562 ts_out1: 563 iscsi_thread_set_free(); 564 configfs_out: 565 iscsi_target_deregister_configfs(); 566 out: 567 kfree(iscsit_global); 568 return -ENOMEM; 569 } 570 571 static void __exit iscsi_target_cleanup_module(void) 572 { 573 iscsi_deallocate_thread_sets(); 574 iscsi_thread_set_free(); 575 iscsit_release_discovery_tpg(); 576 kmem_cache_destroy(lio_cmd_cache); 577 kmem_cache_destroy(lio_qr_cache); 578 kmem_cache_destroy(lio_dr_cache); 579 kmem_cache_destroy(lio_ooo_cache); 580 kmem_cache_destroy(lio_r2t_cache); 581 582 iscsi_target_deregister_configfs(); 583 584 kfree(iscsit_global); 585 } 586 587 static int iscsit_add_reject( 588 u8 reason, 589 int fail_conn, 590 unsigned char *buf, 591 struct iscsi_conn *conn) 592 { 593 struct iscsi_cmd *cmd; 594 struct iscsi_reject *hdr; 595 int ret; 596 597 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 598 if (!cmd) 599 return -1; 600 601 cmd->iscsi_opcode = ISCSI_OP_REJECT; 602 if (fail_conn) 603 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN; 604 605 hdr = (struct iscsi_reject *) cmd->pdu; 606 hdr->reason = reason; 607 608 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); 609 if (!cmd->buf_ptr) { 610 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 611 iscsit_release_cmd(cmd); 612 return -1; 613 } 614 615 spin_lock_bh(&conn->cmd_lock); 616 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 617 spin_unlock_bh(&conn->cmd_lock); 618 619 cmd->i_state = ISTATE_SEND_REJECT; 620 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 621 622 ret = wait_for_completion_interruptible(&cmd->reject_comp); 623 if (ret != 0) 624 return -1; 625 626 return (!fail_conn) ? 0 : -1; 627 } 628 629 int iscsit_add_reject_from_cmd( 630 u8 reason, 631 int fail_conn, 632 int add_to_conn, 633 unsigned char *buf, 634 struct iscsi_cmd *cmd) 635 { 636 struct iscsi_conn *conn; 637 struct iscsi_reject *hdr; 638 int ret; 639 640 if (!cmd->conn) { 641 pr_err("cmd->conn is NULL for ITT: 0x%08x\n", 642 cmd->init_task_tag); 643 return -1; 644 } 645 conn = cmd->conn; 646 647 cmd->iscsi_opcode = ISCSI_OP_REJECT; 648 if (fail_conn) 649 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN; 650 651 hdr = (struct iscsi_reject *) cmd->pdu; 652 hdr->reason = reason; 653 654 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); 655 if (!cmd->buf_ptr) { 656 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 657 iscsit_release_cmd(cmd); 658 return -1; 659 } 660 661 if (add_to_conn) { 662 spin_lock_bh(&conn->cmd_lock); 663 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 664 spin_unlock_bh(&conn->cmd_lock); 665 } 666 667 cmd->i_state = ISTATE_SEND_REJECT; 668 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 669 670 ret = wait_for_completion_interruptible(&cmd->reject_comp); 671 if (ret != 0) 672 return -1; 673 674 return (!fail_conn) ? 0 : -1; 675 } 676 677 /* 678 * Map some portion of the allocated scatterlist to an iovec, suitable for 679 * kernel sockets to copy data in/out. 680 */ 681 static int iscsit_map_iovec( 682 struct iscsi_cmd *cmd, 683 struct kvec *iov, 684 u32 data_offset, 685 u32 data_length) 686 { 687 u32 i = 0; 688 struct scatterlist *sg; 689 unsigned int page_off; 690 691 /* 692 * We know each entry in t_data_sg contains a page. 693 */ 694 sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE]; 695 page_off = (data_offset % PAGE_SIZE); 696 697 cmd->first_data_sg = sg; 698 cmd->first_data_sg_off = page_off; 699 700 while (data_length) { 701 u32 cur_len = min_t(u32, data_length, sg->length - page_off); 702 703 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off; 704 iov[i].iov_len = cur_len; 705 706 data_length -= cur_len; 707 page_off = 0; 708 sg = sg_next(sg); 709 i++; 710 } 711 712 cmd->kmapped_nents = i; 713 714 return i; 715 } 716 717 static void iscsit_unmap_iovec(struct iscsi_cmd *cmd) 718 { 719 u32 i; 720 struct scatterlist *sg; 721 722 sg = cmd->first_data_sg; 723 724 for (i = 0; i < cmd->kmapped_nents; i++) 725 kunmap(sg_page(&sg[i])); 726 } 727 728 static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) 729 { 730 struct iscsi_cmd *cmd; 731 732 conn->exp_statsn = exp_statsn; 733 734 spin_lock_bh(&conn->cmd_lock); 735 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 736 spin_lock(&cmd->istate_lock); 737 if ((cmd->i_state == ISTATE_SENT_STATUS) && 738 (cmd->stat_sn < exp_statsn)) { 739 cmd->i_state = ISTATE_REMOVE; 740 spin_unlock(&cmd->istate_lock); 741 iscsit_add_cmd_to_immediate_queue(cmd, conn, 742 cmd->i_state); 743 continue; 744 } 745 spin_unlock(&cmd->istate_lock); 746 } 747 spin_unlock_bh(&conn->cmd_lock); 748 } 749 750 static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) 751 { 752 u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE)); 753 754 iov_count += ISCSI_IOV_DATA_BUFFER; 755 756 cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL); 757 if (!cmd->iov_data) { 758 pr_err("Unable to allocate cmd->iov_data\n"); 759 return -ENOMEM; 760 } 761 762 cmd->orig_iov_data_count = iov_count; 763 return 0; 764 } 765 766 static int iscsit_handle_scsi_cmd( 767 struct iscsi_conn *conn, 768 unsigned char *buf) 769 { 770 int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret; 771 int dump_immediate_data = 0, send_check_condition = 0, payload_length; 772 struct iscsi_cmd *cmd = NULL; 773 struct iscsi_scsi_req *hdr; 774 int iscsi_task_attr; 775 int sam_task_attr; 776 777 spin_lock_bh(&conn->sess->session_stats_lock); 778 conn->sess->cmd_pdus++; 779 if (conn->sess->se_sess->se_node_acl) { 780 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); 781 conn->sess->se_sess->se_node_acl->num_cmds++; 782 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); 783 } 784 spin_unlock_bh(&conn->sess->session_stats_lock); 785 786 hdr = (struct iscsi_scsi_req *) buf; 787 payload_length = ntoh24(hdr->dlength); 788 hdr->itt = be32_to_cpu(hdr->itt); 789 hdr->data_length = be32_to_cpu(hdr->data_length); 790 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 791 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 792 793 /* FIXME; Add checks for AdditionalHeaderSegment */ 794 795 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && 796 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { 797 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL" 798 " not set. Bad iSCSI Initiator.\n"); 799 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 800 buf, conn); 801 } 802 803 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 804 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { 805 /* 806 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) 807 * that adds support for RESERVE/RELEASE. There is a bug 808 * add with this new functionality that sets R/W bits when 809 * neither CDB carries any READ or WRITE datapayloads. 810 */ 811 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { 812 hdr->flags &= ~ISCSI_FLAG_CMD_READ; 813 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; 814 goto done; 815 } 816 817 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 818 " set when Expected Data Transfer Length is 0 for" 819 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); 820 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 821 buf, conn); 822 } 823 done: 824 825 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && 826 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { 827 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE" 828 " MUST be set if Expected Data Transfer Length is not 0." 829 " Bad iSCSI Initiator\n"); 830 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 831 buf, conn); 832 } 833 834 if ((hdr->flags & ISCSI_FLAG_CMD_READ) && 835 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) { 836 pr_err("Bidirectional operations not supported!\n"); 837 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 838 buf, conn); 839 } 840 841 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 842 pr_err("Illegally set Immediate Bit in iSCSI Initiator" 843 " Scsi Command PDU.\n"); 844 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 845 buf, conn); 846 } 847 848 if (payload_length && !conn->sess->sess_ops->ImmediateData) { 849 pr_err("ImmediateData=No but DataSegmentLength=%u," 850 " protocol error.\n", payload_length); 851 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 852 buf, conn); 853 } 854 855 if ((hdr->data_length == payload_length) && 856 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) { 857 pr_err("Expected Data Transfer Length and Length of" 858 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL" 859 " bit is not set protocol error\n"); 860 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 861 buf, conn); 862 } 863 864 if (payload_length > hdr->data_length) { 865 pr_err("DataSegmentLength: %u is greater than" 866 " EDTL: %u, protocol error.\n", payload_length, 867 hdr->data_length); 868 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 869 buf, conn); 870 } 871 872 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 873 pr_err("DataSegmentLength: %u is greater than" 874 " MaxRecvDataSegmentLength: %u, protocol error.\n", 875 payload_length, conn->conn_ops->MaxRecvDataSegmentLength); 876 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 877 buf, conn); 878 } 879 880 if (payload_length > conn->sess->sess_ops->FirstBurstLength) { 881 pr_err("DataSegmentLength: %u is greater than" 882 " FirstBurstLength: %u, protocol error.\n", 883 payload_length, conn->sess->sess_ops->FirstBurstLength); 884 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 885 buf, conn); 886 } 887 888 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE : 889 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : 890 DMA_NONE; 891 892 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 893 if (!cmd) 894 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 895 buf, conn); 896 897 cmd->data_direction = data_direction; 898 iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK; 899 /* 900 * Figure out the SAM Task Attribute for the incoming SCSI CDB 901 */ 902 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || 903 (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) 904 sam_task_attr = MSG_SIMPLE_TAG; 905 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) 906 sam_task_attr = MSG_ORDERED_TAG; 907 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) 908 sam_task_attr = MSG_HEAD_TAG; 909 else if (iscsi_task_attr == ISCSI_ATTR_ACA) 910 sam_task_attr = MSG_ACA_TAG; 911 else { 912 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" 913 " MSG_SIMPLE_TAG\n", iscsi_task_attr); 914 sam_task_attr = MSG_SIMPLE_TAG; 915 } 916 917 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; 918 cmd->i_state = ISTATE_NEW_CMD; 919 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 920 cmd->immediate_data = (payload_length) ? 1 : 0; 921 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) && 922 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0); 923 if (cmd->unsolicited_data) 924 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA; 925 926 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 927 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 928 spin_lock_bh(&conn->sess->ttt_lock); 929 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; 930 if (cmd->targ_xfer_tag == 0xFFFFFFFF) 931 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; 932 spin_unlock_bh(&conn->sess->ttt_lock); 933 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) 934 cmd->targ_xfer_tag = 0xFFFFFFFF; 935 cmd->cmd_sn = hdr->cmdsn; 936 cmd->exp_stat_sn = hdr->exp_statsn; 937 cmd->first_burst_len = payload_length; 938 939 if (cmd->data_direction == DMA_FROM_DEVICE) { 940 struct iscsi_datain_req *dr; 941 942 dr = iscsit_allocate_datain_req(); 943 if (!dr) 944 return iscsit_add_reject_from_cmd( 945 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 946 1, 1, buf, cmd); 947 948 iscsit_attach_datain_req(cmd, dr); 949 } 950 951 /* 952 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 953 */ 954 transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops, 955 conn->sess->se_sess, hdr->data_length, cmd->data_direction, 956 sam_task_attr, &cmd->sense_buffer[0]); 957 958 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," 959 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, 960 hdr->cmdsn, hdr->data_length, payload_length, conn->cid); 961 962 /* 963 * The CDB is going to an se_device_t. 964 */ 965 ret = transport_lookup_cmd_lun(&cmd->se_cmd, 966 scsilun_to_int(&hdr->lun)); 967 if (ret < 0) { 968 if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) { 969 pr_debug("Responding to non-acl'ed," 970 " non-existent or non-exported iSCSI LUN:" 971 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun)); 972 } 973 send_check_condition = 1; 974 goto attach_cmd; 975 } 976 977 transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb); 978 if (transport_ret == -ENOMEM) { 979 return iscsit_add_reject_from_cmd( 980 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 981 1, 1, buf, cmd); 982 } else if (transport_ret < 0) { 983 /* 984 * Unsupported SAM Opcode. CHECK_CONDITION will be sent 985 * in iscsit_execute_cmd() during the CmdSN OOO Execution 986 * Mechinism. 987 */ 988 send_check_condition = 1; 989 } else { 990 if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) 991 return iscsit_add_reject_from_cmd( 992 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 993 1, 1, buf, cmd); 994 } 995 996 attach_cmd: 997 spin_lock_bh(&conn->cmd_lock); 998 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 999 spin_unlock_bh(&conn->cmd_lock); 1000 /* 1001 * Check if we need to delay processing because of ALUA 1002 * Active/NonOptimized primary access state.. 1003 */ 1004 core_alua_check_nonop_delay(&cmd->se_cmd); 1005 1006 ret = iscsit_allocate_iovecs(cmd); 1007 if (ret < 0) 1008 return iscsit_add_reject_from_cmd( 1009 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1010 1, 0, buf, cmd); 1011 /* 1012 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if 1013 * the Immediate Bit is not set, and no Immediate 1014 * Data is attached. 1015 * 1016 * A PDU/CmdSN carrying Immediate Data can only 1017 * be processed after the DataCRC has passed. 1018 * If the DataCRC fails, the CmdSN MUST NOT 1019 * be acknowledged. (See below) 1020 */ 1021 if (!cmd->immediate_data) { 1022 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1023 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1024 return 0; 1025 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1026 return iscsit_add_reject_from_cmd( 1027 ISCSI_REASON_PROTOCOL_ERROR, 1028 1, 0, buf, cmd); 1029 } 1030 1031 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1032 1033 /* 1034 * If no Immediate Data is attached, it's OK to return now. 1035 */ 1036 if (!cmd->immediate_data) { 1037 if (send_check_condition) 1038 return 0; 1039 1040 if (cmd->unsolicited_data) { 1041 iscsit_set_dataout_sequence_values(cmd); 1042 1043 spin_lock_bh(&cmd->dataout_timeout_lock); 1044 iscsit_start_dataout_timer(cmd, cmd->conn); 1045 spin_unlock_bh(&cmd->dataout_timeout_lock); 1046 } 1047 1048 return 0; 1049 } 1050 1051 /* 1052 * Early CHECK_CONDITIONs never make it to the transport processing 1053 * thread. They are processed in CmdSN order by 1054 * iscsit_check_received_cmdsn() below. 1055 */ 1056 if (send_check_condition) { 1057 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1058 dump_immediate_data = 1; 1059 goto after_immediate_data; 1060 } 1061 /* 1062 * Call directly into transport_generic_new_cmd() to perform 1063 * the backend memory allocation. 1064 */ 1065 ret = transport_generic_new_cmd(&cmd->se_cmd); 1066 if (ret < 0) { 1067 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1068 dump_immediate_data = 1; 1069 goto after_immediate_data; 1070 } 1071 1072 immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length); 1073 after_immediate_data: 1074 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { 1075 /* 1076 * A PDU/CmdSN carrying Immediate Data passed 1077 * DataCRC, check against ExpCmdSN/MaxCmdSN if 1078 * Immediate Bit is not set. 1079 */ 1080 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1081 /* 1082 * Special case for Unsupported SAM WRITE Opcodes 1083 * and ImmediateData=Yes. 1084 */ 1085 if (dump_immediate_data) { 1086 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 1087 return -1; 1088 } else if (cmd->unsolicited_data) { 1089 iscsit_set_dataout_sequence_values(cmd); 1090 1091 spin_lock_bh(&cmd->dataout_timeout_lock); 1092 iscsit_start_dataout_timer(cmd, cmd->conn); 1093 spin_unlock_bh(&cmd->dataout_timeout_lock); 1094 } 1095 1096 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1097 return iscsit_add_reject_from_cmd( 1098 ISCSI_REASON_PROTOCOL_ERROR, 1099 1, 0, buf, cmd); 1100 1101 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { 1102 /* 1103 * Immediate Data failed DataCRC and ERL>=1, 1104 * silently drop this PDU and let the initiator 1105 * plug the CmdSN gap. 1106 * 1107 * FIXME: Send Unsolicited NOPIN with reserved 1108 * TTT here to help the initiator figure out 1109 * the missing CmdSN, although they should be 1110 * intelligent enough to determine the missing 1111 * CmdSN and issue a retry to plug the sequence. 1112 */ 1113 cmd->i_state = ISTATE_REMOVE; 1114 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 1115 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ 1116 return -1; 1117 1118 return 0; 1119 } 1120 1121 static u32 iscsit_do_crypto_hash_sg( 1122 struct hash_desc *hash, 1123 struct iscsi_cmd *cmd, 1124 u32 data_offset, 1125 u32 data_length, 1126 u32 padding, 1127 u8 *pad_bytes) 1128 { 1129 u32 data_crc; 1130 u32 i; 1131 struct scatterlist *sg; 1132 unsigned int page_off; 1133 1134 crypto_hash_init(hash); 1135 1136 sg = cmd->first_data_sg; 1137 page_off = cmd->first_data_sg_off; 1138 1139 i = 0; 1140 while (data_length) { 1141 u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off)); 1142 1143 crypto_hash_update(hash, &sg[i], cur_len); 1144 1145 data_length -= cur_len; 1146 page_off = 0; 1147 i++; 1148 } 1149 1150 if (padding) { 1151 struct scatterlist pad_sg; 1152 1153 sg_init_one(&pad_sg, pad_bytes, padding); 1154 crypto_hash_update(hash, &pad_sg, padding); 1155 } 1156 crypto_hash_final(hash, (u8 *) &data_crc); 1157 1158 return data_crc; 1159 } 1160 1161 static void iscsit_do_crypto_hash_buf( 1162 struct hash_desc *hash, 1163 unsigned char *buf, 1164 u32 payload_length, 1165 u32 padding, 1166 u8 *pad_bytes, 1167 u8 *data_crc) 1168 { 1169 struct scatterlist sg; 1170 1171 crypto_hash_init(hash); 1172 1173 sg_init_one(&sg, buf, payload_length); 1174 crypto_hash_update(hash, &sg, payload_length); 1175 1176 if (padding) { 1177 sg_init_one(&sg, pad_bytes, padding); 1178 crypto_hash_update(hash, &sg, padding); 1179 } 1180 crypto_hash_final(hash, data_crc); 1181 } 1182 1183 static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) 1184 { 1185 int iov_ret, ooo_cmdsn = 0, ret; 1186 u8 data_crc_failed = 0; 1187 u32 checksum, iov_count = 0, padding = 0, rx_got = 0; 1188 u32 rx_size = 0, payload_length; 1189 struct iscsi_cmd *cmd = NULL; 1190 struct se_cmd *se_cmd; 1191 struct iscsi_data *hdr; 1192 struct kvec *iov; 1193 unsigned long flags; 1194 1195 hdr = (struct iscsi_data *) buf; 1196 payload_length = ntoh24(hdr->dlength); 1197 hdr->itt = be32_to_cpu(hdr->itt); 1198 hdr->ttt = be32_to_cpu(hdr->ttt); 1199 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1200 hdr->datasn = be32_to_cpu(hdr->datasn); 1201 hdr->offset = be32_to_cpu(hdr->offset); 1202 1203 if (!payload_length) { 1204 pr_err("DataOUT payload is ZERO, protocol error.\n"); 1205 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1206 buf, conn); 1207 } 1208 1209 /* iSCSI write */ 1210 spin_lock_bh(&conn->sess->session_stats_lock); 1211 conn->sess->rx_data_octets += payload_length; 1212 if (conn->sess->se_sess->se_node_acl) { 1213 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); 1214 conn->sess->se_sess->se_node_acl->write_bytes += payload_length; 1215 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); 1216 } 1217 spin_unlock_bh(&conn->sess->session_stats_lock); 1218 1219 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1220 pr_err("DataSegmentLength: %u is greater than" 1221 " MaxRecvDataSegmentLength: %u\n", payload_length, 1222 conn->conn_ops->MaxRecvDataSegmentLength); 1223 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1224 buf, conn); 1225 } 1226 1227 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 1228 payload_length); 1229 if (!cmd) 1230 return 0; 1231 1232 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," 1233 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 1234 hdr->itt, hdr->ttt, hdr->datasn, hdr->offset, 1235 payload_length, conn->cid); 1236 1237 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { 1238 pr_err("Command ITT: 0x%08x received DataOUT after" 1239 " last DataOUT received, dumping payload\n", 1240 cmd->init_task_tag); 1241 return iscsit_dump_data_payload(conn, payload_length, 1); 1242 } 1243 1244 if (cmd->data_direction != DMA_TO_DEVICE) { 1245 pr_err("Command ITT: 0x%08x received DataOUT for a" 1246 " NON-WRITE command.\n", cmd->init_task_tag); 1247 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR, 1248 1, 0, buf, cmd); 1249 } 1250 se_cmd = &cmd->se_cmd; 1251 iscsit_mod_dataout_timer(cmd); 1252 1253 if ((hdr->offset + payload_length) > cmd->se_cmd.data_length) { 1254 pr_err("DataOut Offset: %u, Length %u greater than" 1255 " iSCSI Command EDTL %u, protocol error.\n", 1256 hdr->offset, payload_length, cmd->se_cmd.data_length); 1257 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 1258 1, 0, buf, cmd); 1259 } 1260 1261 if (cmd->unsolicited_data) { 1262 int dump_unsolicited_data = 0; 1263 1264 if (conn->sess->sess_ops->InitialR2T) { 1265 pr_err("Received unexpected unsolicited data" 1266 " while InitialR2T=Yes, protocol error.\n"); 1267 transport_send_check_condition_and_sense(&cmd->se_cmd, 1268 TCM_UNEXPECTED_UNSOLICITED_DATA, 0); 1269 return -1; 1270 } 1271 /* 1272 * Special case for dealing with Unsolicited DataOUT 1273 * and Unsupported SAM WRITE Opcodes and SE resource allocation 1274 * failures; 1275 */ 1276 1277 /* Something's amiss if we're not in WRITE_PENDING state... */ 1278 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1279 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING); 1280 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1281 1282 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1283 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) || 1284 (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)) 1285 dump_unsolicited_data = 1; 1286 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1287 1288 if (dump_unsolicited_data) { 1289 /* 1290 * Check if a delayed TASK_ABORTED status needs to 1291 * be sent now if the ISCSI_FLAG_CMD_FINAL has been 1292 * received with the unsolicitied data out. 1293 */ 1294 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1295 iscsit_stop_dataout_timer(cmd); 1296 1297 transport_check_aborted_status(se_cmd, 1298 (hdr->flags & ISCSI_FLAG_CMD_FINAL)); 1299 return iscsit_dump_data_payload(conn, payload_length, 1); 1300 } 1301 } else { 1302 /* 1303 * For the normal solicited data path: 1304 * 1305 * Check for a delayed TASK_ABORTED status and dump any 1306 * incoming data out payload if one exists. Also, when the 1307 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current 1308 * data out sequence, we decrement outstanding_r2ts. Once 1309 * outstanding_r2ts reaches zero, go ahead and send the delayed 1310 * TASK_ABORTED status. 1311 */ 1312 if (se_cmd->transport_state & CMD_T_ABORTED) { 1313 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1314 if (--cmd->outstanding_r2ts < 1) { 1315 iscsit_stop_dataout_timer(cmd); 1316 transport_check_aborted_status( 1317 se_cmd, 1); 1318 } 1319 1320 return iscsit_dump_data_payload(conn, payload_length, 1); 1321 } 1322 } 1323 /* 1324 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and 1325 * within-command recovery checks before receiving the payload. 1326 */ 1327 ret = iscsit_check_pre_dataout(cmd, buf); 1328 if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY) 1329 return 0; 1330 else if (ret == DATAOUT_CANNOT_RECOVER) 1331 return -1; 1332 1333 rx_size += payload_length; 1334 iov = &cmd->iov_data[0]; 1335 1336 iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length); 1337 if (iov_ret < 0) 1338 return -1; 1339 1340 iov_count += iov_ret; 1341 1342 padding = ((-payload_length) & 3); 1343 if (padding != 0) { 1344 iov[iov_count].iov_base = cmd->pad_bytes; 1345 iov[iov_count++].iov_len = padding; 1346 rx_size += padding; 1347 pr_debug("Receiving %u padding bytes.\n", padding); 1348 } 1349 1350 if (conn->conn_ops->DataDigest) { 1351 iov[iov_count].iov_base = &checksum; 1352 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 1353 rx_size += ISCSI_CRC_LEN; 1354 } 1355 1356 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); 1357 1358 iscsit_unmap_iovec(cmd); 1359 1360 if (rx_got != rx_size) 1361 return -1; 1362 1363 if (conn->conn_ops->DataDigest) { 1364 u32 data_crc; 1365 1366 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, 1367 hdr->offset, payload_length, padding, 1368 cmd->pad_bytes); 1369 1370 if (checksum != data_crc) { 1371 pr_err("ITT: 0x%08x, Offset: %u, Length: %u," 1372 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x" 1373 " does not match computed 0x%08x\n", 1374 hdr->itt, hdr->offset, payload_length, 1375 hdr->datasn, checksum, data_crc); 1376 data_crc_failed = 1; 1377 } else { 1378 pr_debug("Got CRC32C DataDigest 0x%08x for" 1379 " %u bytes of Data Out\n", checksum, 1380 payload_length); 1381 } 1382 } 1383 /* 1384 * Increment post receive data and CRC values or perform 1385 * within-command recovery. 1386 */ 1387 ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed); 1388 if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)) 1389 return 0; 1390 else if (ret == DATAOUT_SEND_R2T) { 1391 iscsit_set_dataout_sequence_values(cmd); 1392 iscsit_build_r2ts_for_cmd(cmd, conn, false); 1393 } else if (ret == DATAOUT_SEND_TO_TRANSPORT) { 1394 /* 1395 * Handle extra special case for out of order 1396 * Unsolicited Data Out. 1397 */ 1398 spin_lock_bh(&cmd->istate_lock); 1399 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN); 1400 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1401 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1402 spin_unlock_bh(&cmd->istate_lock); 1403 1404 iscsit_stop_dataout_timer(cmd); 1405 if (ooo_cmdsn) 1406 return 0; 1407 target_execute_cmd(&cmd->se_cmd); 1408 return 0; 1409 } else /* DATAOUT_CANNOT_RECOVER */ 1410 return -1; 1411 1412 return 0; 1413 } 1414 1415 static int iscsit_handle_nop_out( 1416 struct iscsi_conn *conn, 1417 unsigned char *buf) 1418 { 1419 unsigned char *ping_data = NULL; 1420 int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size; 1421 u32 checksum, data_crc, padding = 0, payload_length; 1422 struct iscsi_cmd *cmd = NULL; 1423 struct kvec *iov = NULL; 1424 struct iscsi_nopout *hdr; 1425 1426 hdr = (struct iscsi_nopout *) buf; 1427 payload_length = ntoh24(hdr->dlength); 1428 hdr->itt = be32_to_cpu(hdr->itt); 1429 hdr->ttt = be32_to_cpu(hdr->ttt); 1430 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 1431 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1432 1433 if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1434 pr_err("NOPOUT ITT is reserved, but Immediate Bit is" 1435 " not set, protocol error.\n"); 1436 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1437 buf, conn); 1438 } 1439 1440 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1441 pr_err("NOPOUT Ping Data DataSegmentLength: %u is" 1442 " greater than MaxRecvDataSegmentLength: %u, protocol" 1443 " error.\n", payload_length, 1444 conn->conn_ops->MaxRecvDataSegmentLength); 1445 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1446 buf, conn); 1447 } 1448 1449 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x," 1450 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", 1451 (hdr->itt == 0xFFFFFFFF) ? "Response" : "Request", 1452 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn, 1453 payload_length); 1454 /* 1455 * This is not a response to a Unsolicited NopIN, which means 1456 * it can either be a NOPOUT ping request (with a valid ITT), 1457 * or a NOPOUT not requesting a NOPIN (with a reserved ITT). 1458 * Either way, make sure we allocate an struct iscsi_cmd, as both 1459 * can contain ping data. 1460 */ 1461 if (hdr->ttt == 0xFFFFFFFF) { 1462 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1463 if (!cmd) 1464 return iscsit_add_reject( 1465 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1466 1, buf, conn); 1467 1468 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT; 1469 cmd->i_state = ISTATE_SEND_NOPIN; 1470 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1471 1 : 0); 1472 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1473 cmd->targ_xfer_tag = 0xFFFFFFFF; 1474 cmd->cmd_sn = hdr->cmdsn; 1475 cmd->exp_stat_sn = hdr->exp_statsn; 1476 cmd->data_direction = DMA_NONE; 1477 } 1478 1479 if (payload_length && (hdr->ttt == 0xFFFFFFFF)) { 1480 rx_size = payload_length; 1481 ping_data = kzalloc(payload_length + 1, GFP_KERNEL); 1482 if (!ping_data) { 1483 pr_err("Unable to allocate memory for" 1484 " NOPOUT ping data.\n"); 1485 ret = -1; 1486 goto out; 1487 } 1488 1489 iov = &cmd->iov_misc[0]; 1490 iov[niov].iov_base = ping_data; 1491 iov[niov++].iov_len = payload_length; 1492 1493 padding = ((-payload_length) & 3); 1494 if (padding != 0) { 1495 pr_debug("Receiving %u additional bytes" 1496 " for padding.\n", padding); 1497 iov[niov].iov_base = &cmd->pad_bytes; 1498 iov[niov++].iov_len = padding; 1499 rx_size += padding; 1500 } 1501 if (conn->conn_ops->DataDigest) { 1502 iov[niov].iov_base = &checksum; 1503 iov[niov++].iov_len = ISCSI_CRC_LEN; 1504 rx_size += ISCSI_CRC_LEN; 1505 } 1506 1507 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size); 1508 if (rx_got != rx_size) { 1509 ret = -1; 1510 goto out; 1511 } 1512 1513 if (conn->conn_ops->DataDigest) { 1514 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 1515 ping_data, payload_length, 1516 padding, cmd->pad_bytes, 1517 (u8 *)&data_crc); 1518 1519 if (checksum != data_crc) { 1520 pr_err("Ping data CRC32C DataDigest" 1521 " 0x%08x does not match computed 0x%08x\n", 1522 checksum, data_crc); 1523 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 1524 pr_err("Unable to recover from" 1525 " NOPOUT Ping DataCRC failure while in" 1526 " ERL=0.\n"); 1527 ret = -1; 1528 goto out; 1529 } else { 1530 /* 1531 * Silently drop this PDU and let the 1532 * initiator plug the CmdSN gap. 1533 */ 1534 pr_debug("Dropping NOPOUT" 1535 " Command CmdSN: 0x%08x due to" 1536 " DataCRC error.\n", hdr->cmdsn); 1537 ret = 0; 1538 goto out; 1539 } 1540 } else { 1541 pr_debug("Got CRC32C DataDigest" 1542 " 0x%08x for %u bytes of ping data.\n", 1543 checksum, payload_length); 1544 } 1545 } 1546 1547 ping_data[payload_length] = '\0'; 1548 /* 1549 * Attach ping data to struct iscsi_cmd->buf_ptr. 1550 */ 1551 cmd->buf_ptr = ping_data; 1552 cmd->buf_ptr_size = payload_length; 1553 1554 pr_debug("Got %u bytes of NOPOUT ping" 1555 " data.\n", payload_length); 1556 pr_debug("Ping Data: \"%s\"\n", ping_data); 1557 } 1558 1559 if (hdr->itt != 0xFFFFFFFF) { 1560 if (!cmd) { 1561 pr_err("Checking CmdSN for NOPOUT," 1562 " but cmd is NULL!\n"); 1563 return -1; 1564 } 1565 /* 1566 * Initiator is expecting a NopIN ping reply, 1567 */ 1568 spin_lock_bh(&conn->cmd_lock); 1569 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1570 spin_unlock_bh(&conn->cmd_lock); 1571 1572 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1573 1574 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 1575 iscsit_add_cmd_to_response_queue(cmd, conn, 1576 cmd->i_state); 1577 return 0; 1578 } 1579 1580 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1581 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 1582 ret = 0; 1583 goto ping_out; 1584 } 1585 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1586 return iscsit_add_reject_from_cmd( 1587 ISCSI_REASON_PROTOCOL_ERROR, 1588 1, 0, buf, cmd); 1589 1590 return 0; 1591 } 1592 1593 if (hdr->ttt != 0xFFFFFFFF) { 1594 /* 1595 * This was a response to a unsolicited NOPIN ping. 1596 */ 1597 cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt); 1598 if (!cmd) 1599 return -1; 1600 1601 iscsit_stop_nopin_response_timer(conn); 1602 1603 cmd->i_state = ISTATE_REMOVE; 1604 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 1605 iscsit_start_nopin_timer(conn); 1606 } else { 1607 /* 1608 * Initiator is not expecting a NOPIN is response. 1609 * Just ignore for now. 1610 * 1611 * iSCSI v19-91 10.18 1612 * "A NOP-OUT may also be used to confirm a changed 1613 * ExpStatSN if another PDU will not be available 1614 * for a long time." 1615 */ 1616 ret = 0; 1617 goto out; 1618 } 1619 1620 return 0; 1621 out: 1622 if (cmd) 1623 iscsit_release_cmd(cmd); 1624 ping_out: 1625 kfree(ping_data); 1626 return ret; 1627 } 1628 1629 static int iscsit_handle_task_mgt_cmd( 1630 struct iscsi_conn *conn, 1631 unsigned char *buf) 1632 { 1633 struct iscsi_cmd *cmd; 1634 struct se_tmr_req *se_tmr; 1635 struct iscsi_tmr_req *tmr_req; 1636 struct iscsi_tm *hdr; 1637 int out_of_order_cmdsn = 0; 1638 int ret; 1639 u8 function; 1640 1641 hdr = (struct iscsi_tm *) buf; 1642 hdr->itt = be32_to_cpu(hdr->itt); 1643 hdr->rtt = be32_to_cpu(hdr->rtt); 1644 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 1645 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1646 hdr->refcmdsn = be32_to_cpu(hdr->refcmdsn); 1647 hdr->exp_datasn = be32_to_cpu(hdr->exp_datasn); 1648 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 1649 function = hdr->flags; 1650 1651 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:" 1652 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:" 1653 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function, 1654 hdr->rtt, hdr->refcmdsn, conn->cid); 1655 1656 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1657 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 1658 (hdr->rtt != ISCSI_RESERVED_TAG))) { 1659 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n"); 1660 hdr->rtt = ISCSI_RESERVED_TAG; 1661 } 1662 1663 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) && 1664 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1665 pr_err("Task Management Request TASK_REASSIGN not" 1666 " issued as immediate command, bad iSCSI Initiator" 1667 "implementation\n"); 1668 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1669 buf, conn); 1670 } 1671 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1672 (hdr->refcmdsn != ISCSI_RESERVED_TAG)) 1673 hdr->refcmdsn = ISCSI_RESERVED_TAG; 1674 1675 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1676 if (!cmd) 1677 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1678 1, buf, conn); 1679 1680 cmd->data_direction = DMA_NONE; 1681 1682 cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL); 1683 if (!cmd->tmr_req) { 1684 pr_err("Unable to allocate memory for" 1685 " Task Management command!\n"); 1686 return iscsit_add_reject_from_cmd( 1687 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1688 1, 1, buf, cmd); 1689 } 1690 1691 /* 1692 * TASK_REASSIGN for ERL=2 / connection stays inside of 1693 * LIO-Target $FABRIC_MOD 1694 */ 1695 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { 1696 1697 u8 tcm_function; 1698 int ret; 1699 1700 transport_init_se_cmd(&cmd->se_cmd, 1701 &lio_target_fabric_configfs->tf_ops, 1702 conn->sess->se_sess, 0, DMA_NONE, 1703 MSG_SIMPLE_TAG, &cmd->sense_buffer[0]); 1704 1705 switch (function) { 1706 case ISCSI_TM_FUNC_ABORT_TASK: 1707 tcm_function = TMR_ABORT_TASK; 1708 break; 1709 case ISCSI_TM_FUNC_ABORT_TASK_SET: 1710 tcm_function = TMR_ABORT_TASK_SET; 1711 break; 1712 case ISCSI_TM_FUNC_CLEAR_ACA: 1713 tcm_function = TMR_CLEAR_ACA; 1714 break; 1715 case ISCSI_TM_FUNC_CLEAR_TASK_SET: 1716 tcm_function = TMR_CLEAR_TASK_SET; 1717 break; 1718 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: 1719 tcm_function = TMR_LUN_RESET; 1720 break; 1721 case ISCSI_TM_FUNC_TARGET_WARM_RESET: 1722 tcm_function = TMR_TARGET_WARM_RESET; 1723 break; 1724 case ISCSI_TM_FUNC_TARGET_COLD_RESET: 1725 tcm_function = TMR_TARGET_COLD_RESET; 1726 break; 1727 default: 1728 pr_err("Unknown iSCSI TMR Function:" 1729 " 0x%02x\n", function); 1730 return iscsit_add_reject_from_cmd( 1731 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1732 1, 1, buf, cmd); 1733 } 1734 1735 ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, 1736 tcm_function, GFP_KERNEL); 1737 if (ret < 0) 1738 return iscsit_add_reject_from_cmd( 1739 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1740 1, 1, buf, cmd); 1741 1742 cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; 1743 } 1744 1745 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; 1746 cmd->i_state = ISTATE_SEND_TASKMGTRSP; 1747 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1748 cmd->init_task_tag = hdr->itt; 1749 cmd->targ_xfer_tag = 0xFFFFFFFF; 1750 cmd->cmd_sn = hdr->cmdsn; 1751 cmd->exp_stat_sn = hdr->exp_statsn; 1752 se_tmr = cmd->se_cmd.se_tmr_req; 1753 tmr_req = cmd->tmr_req; 1754 /* 1755 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN 1756 */ 1757 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { 1758 ret = transport_lookup_tmr_lun(&cmd->se_cmd, 1759 scsilun_to_int(&hdr->lun)); 1760 if (ret < 0) { 1761 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1762 se_tmr->response = ISCSI_TMF_RSP_NO_LUN; 1763 goto attach; 1764 } 1765 } 1766 1767 switch (function) { 1768 case ISCSI_TM_FUNC_ABORT_TASK: 1769 se_tmr->response = iscsit_tmr_abort_task(cmd, buf); 1770 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) { 1771 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1772 goto attach; 1773 } 1774 break; 1775 case ISCSI_TM_FUNC_ABORT_TASK_SET: 1776 case ISCSI_TM_FUNC_CLEAR_ACA: 1777 case ISCSI_TM_FUNC_CLEAR_TASK_SET: 1778 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: 1779 break; 1780 case ISCSI_TM_FUNC_TARGET_WARM_RESET: 1781 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) { 1782 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1783 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; 1784 goto attach; 1785 } 1786 break; 1787 case ISCSI_TM_FUNC_TARGET_COLD_RESET: 1788 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) { 1789 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1790 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; 1791 goto attach; 1792 } 1793 break; 1794 case ISCSI_TM_FUNC_TASK_REASSIGN: 1795 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf); 1796 /* 1797 * Perform sanity checks on the ExpDataSN only if the 1798 * TASK_REASSIGN was successful. 1799 */ 1800 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) 1801 break; 1802 1803 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0) 1804 return iscsit_add_reject_from_cmd( 1805 ISCSI_REASON_BOOKMARK_INVALID, 1, 1, 1806 buf, cmd); 1807 break; 1808 default: 1809 pr_err("Unknown TMR function: 0x%02x, protocol" 1810 " error.\n", function); 1811 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1812 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED; 1813 goto attach; 1814 } 1815 1816 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 1817 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE)) 1818 se_tmr->call_transport = 1; 1819 attach: 1820 spin_lock_bh(&conn->cmd_lock); 1821 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1822 spin_unlock_bh(&conn->cmd_lock); 1823 1824 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1825 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1826 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) 1827 out_of_order_cmdsn = 1; 1828 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1829 return 0; 1830 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1831 return iscsit_add_reject_from_cmd( 1832 ISCSI_REASON_PROTOCOL_ERROR, 1833 1, 0, buf, cmd); 1834 } 1835 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1836 1837 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE)) 1838 return 0; 1839 /* 1840 * Found the referenced task, send to transport for processing. 1841 */ 1842 if (se_tmr->call_transport) 1843 return transport_generic_handle_tmr(&cmd->se_cmd); 1844 1845 /* 1846 * Could not find the referenced LUN, task, or Task Management 1847 * command not authorized or supported. Change state and 1848 * let the tx_thread send the response. 1849 * 1850 * For connection recovery, this is also the default action for 1851 * TMR TASK_REASSIGN. 1852 */ 1853 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 1854 return 0; 1855 } 1856 1857 /* #warning FIXME: Support Text Command parameters besides SendTargets */ 1858 static int iscsit_handle_text_cmd( 1859 struct iscsi_conn *conn, 1860 unsigned char *buf) 1861 { 1862 char *text_ptr, *text_in; 1863 int cmdsn_ret, niov = 0, rx_got, rx_size; 1864 u32 checksum = 0, data_crc = 0, payload_length; 1865 u32 padding = 0, pad_bytes = 0, text_length = 0; 1866 struct iscsi_cmd *cmd; 1867 struct kvec iov[3]; 1868 struct iscsi_text *hdr; 1869 1870 hdr = (struct iscsi_text *) buf; 1871 payload_length = ntoh24(hdr->dlength); 1872 hdr->itt = be32_to_cpu(hdr->itt); 1873 hdr->ttt = be32_to_cpu(hdr->ttt); 1874 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 1875 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1876 1877 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1878 pr_err("Unable to accept text parameter length: %u" 1879 "greater than MaxRecvDataSegmentLength %u.\n", 1880 payload_length, conn->conn_ops->MaxRecvDataSegmentLength); 1881 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1882 buf, conn); 1883 } 1884 1885 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x," 1886 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn, 1887 hdr->exp_statsn, payload_length); 1888 1889 rx_size = text_length = payload_length; 1890 if (text_length) { 1891 text_in = kzalloc(text_length, GFP_KERNEL); 1892 if (!text_in) { 1893 pr_err("Unable to allocate memory for" 1894 " incoming text parameters\n"); 1895 return -1; 1896 } 1897 1898 memset(iov, 0, 3 * sizeof(struct kvec)); 1899 iov[niov].iov_base = text_in; 1900 iov[niov++].iov_len = text_length; 1901 1902 padding = ((-payload_length) & 3); 1903 if (padding != 0) { 1904 iov[niov].iov_base = &pad_bytes; 1905 iov[niov++].iov_len = padding; 1906 rx_size += padding; 1907 pr_debug("Receiving %u additional bytes" 1908 " for padding.\n", padding); 1909 } 1910 if (conn->conn_ops->DataDigest) { 1911 iov[niov].iov_base = &checksum; 1912 iov[niov++].iov_len = ISCSI_CRC_LEN; 1913 rx_size += ISCSI_CRC_LEN; 1914 } 1915 1916 rx_got = rx_data(conn, &iov[0], niov, rx_size); 1917 if (rx_got != rx_size) { 1918 kfree(text_in); 1919 return -1; 1920 } 1921 1922 if (conn->conn_ops->DataDigest) { 1923 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 1924 text_in, text_length, 1925 padding, (u8 *)&pad_bytes, 1926 (u8 *)&data_crc); 1927 1928 if (checksum != data_crc) { 1929 pr_err("Text data CRC32C DataDigest" 1930 " 0x%08x does not match computed" 1931 " 0x%08x\n", checksum, data_crc); 1932 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 1933 pr_err("Unable to recover from" 1934 " Text Data digest failure while in" 1935 " ERL=0.\n"); 1936 kfree(text_in); 1937 return -1; 1938 } else { 1939 /* 1940 * Silently drop this PDU and let the 1941 * initiator plug the CmdSN gap. 1942 */ 1943 pr_debug("Dropping Text" 1944 " Command CmdSN: 0x%08x due to" 1945 " DataCRC error.\n", hdr->cmdsn); 1946 kfree(text_in); 1947 return 0; 1948 } 1949 } else { 1950 pr_debug("Got CRC32C DataDigest" 1951 " 0x%08x for %u bytes of text data.\n", 1952 checksum, text_length); 1953 } 1954 } 1955 text_in[text_length - 1] = '\0'; 1956 pr_debug("Successfully read %d bytes of text" 1957 " data.\n", text_length); 1958 1959 if (strncmp("SendTargets", text_in, 11) != 0) { 1960 pr_err("Received Text Data that is not" 1961 " SendTargets, cannot continue.\n"); 1962 kfree(text_in); 1963 return -1; 1964 } 1965 text_ptr = strchr(text_in, '='); 1966 if (!text_ptr) { 1967 pr_err("No \"=\" separator found in Text Data," 1968 " cannot continue.\n"); 1969 kfree(text_in); 1970 return -1; 1971 } 1972 if (strncmp("=All", text_ptr, 4) != 0) { 1973 pr_err("Unable to locate All value for" 1974 " SendTargets key, cannot continue.\n"); 1975 kfree(text_in); 1976 return -1; 1977 } 1978 /*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */ 1979 kfree(text_in); 1980 } 1981 1982 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1983 if (!cmd) 1984 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1985 1, buf, conn); 1986 1987 cmd->iscsi_opcode = ISCSI_OP_TEXT; 1988 cmd->i_state = ISTATE_SEND_TEXTRSP; 1989 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1990 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1991 cmd->targ_xfer_tag = 0xFFFFFFFF; 1992 cmd->cmd_sn = hdr->cmdsn; 1993 cmd->exp_stat_sn = hdr->exp_statsn; 1994 cmd->data_direction = DMA_NONE; 1995 1996 spin_lock_bh(&conn->cmd_lock); 1997 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1998 spin_unlock_bh(&conn->cmd_lock); 1999 2000 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 2001 2002 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 2003 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 2004 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 2005 return iscsit_add_reject_from_cmd( 2006 ISCSI_REASON_PROTOCOL_ERROR, 2007 1, 0, buf, cmd); 2008 2009 return 0; 2010 } 2011 2012 return iscsit_execute_cmd(cmd, 0); 2013 } 2014 2015 int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2016 { 2017 struct iscsi_conn *conn_p; 2018 struct iscsi_session *sess = conn->sess; 2019 2020 pr_debug("Received logout request CLOSESESSION on CID: %hu" 2021 " for SID: %u.\n", conn->cid, conn->sess->sid); 2022 2023 atomic_set(&sess->session_logout, 1); 2024 atomic_set(&conn->conn_logout_remove, 1); 2025 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION; 2026 2027 iscsit_inc_conn_usage_count(conn); 2028 iscsit_inc_session_usage_count(sess); 2029 2030 spin_lock_bh(&sess->conn_lock); 2031 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) { 2032 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN) 2033 continue; 2034 2035 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 2036 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT; 2037 } 2038 spin_unlock_bh(&sess->conn_lock); 2039 2040 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2041 2042 return 0; 2043 } 2044 2045 int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2046 { 2047 struct iscsi_conn *l_conn; 2048 struct iscsi_session *sess = conn->sess; 2049 2050 pr_debug("Received logout request CLOSECONNECTION for CID:" 2051 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); 2052 2053 /* 2054 * A Logout Request with a CLOSECONNECTION reason code for a CID 2055 * can arrive on a connection with a differing CID. 2056 */ 2057 if (conn->cid == cmd->logout_cid) { 2058 spin_lock_bh(&conn->state_lock); 2059 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 2060 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; 2061 2062 atomic_set(&conn->conn_logout_remove, 1); 2063 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION; 2064 iscsit_inc_conn_usage_count(conn); 2065 2066 spin_unlock_bh(&conn->state_lock); 2067 } else { 2068 /* 2069 * Handle all different cid CLOSECONNECTION requests in 2070 * iscsit_logout_post_handler_diffcid() as to give enough 2071 * time for any non immediate command's CmdSN to be 2072 * acknowledged on the connection in question. 2073 * 2074 * Here we simply make sure the CID is still around. 2075 */ 2076 l_conn = iscsit_get_conn_from_cid(sess, 2077 cmd->logout_cid); 2078 if (!l_conn) { 2079 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; 2080 iscsit_add_cmd_to_response_queue(cmd, conn, 2081 cmd->i_state); 2082 return 0; 2083 } 2084 2085 iscsit_dec_conn_usage_count(l_conn); 2086 } 2087 2088 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2089 2090 return 0; 2091 } 2092 2093 int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2094 { 2095 struct iscsi_session *sess = conn->sess; 2096 2097 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for" 2098 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); 2099 2100 if (sess->sess_ops->ErrorRecoveryLevel != 2) { 2101 pr_err("Received Logout Request REMOVECONNFORRECOVERY" 2102 " while ERL!=2.\n"); 2103 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED; 2104 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2105 return 0; 2106 } 2107 2108 if (conn->cid == cmd->logout_cid) { 2109 pr_err("Received Logout Request REMOVECONNFORRECOVERY" 2110 " with CID: %hu on CID: %hu, implementation error.\n", 2111 cmd->logout_cid, conn->cid); 2112 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED; 2113 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2114 return 0; 2115 } 2116 2117 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2118 2119 return 0; 2120 } 2121 2122 static int iscsit_handle_logout_cmd( 2123 struct iscsi_conn *conn, 2124 unsigned char *buf) 2125 { 2126 int cmdsn_ret, logout_remove = 0; 2127 u8 reason_code = 0; 2128 struct iscsi_cmd *cmd; 2129 struct iscsi_logout *hdr; 2130 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn); 2131 2132 hdr = (struct iscsi_logout *) buf; 2133 reason_code = (hdr->flags & 0x7f); 2134 hdr->itt = be32_to_cpu(hdr->itt); 2135 hdr->cid = be16_to_cpu(hdr->cid); 2136 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 2137 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 2138 2139 if (tiqn) { 2140 spin_lock(&tiqn->logout_stats.lock); 2141 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) 2142 tiqn->logout_stats.normal_logouts++; 2143 else 2144 tiqn->logout_stats.abnormal_logouts++; 2145 spin_unlock(&tiqn->logout_stats.lock); 2146 } 2147 2148 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x" 2149 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n", 2150 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code, 2151 hdr->cid, conn->cid); 2152 2153 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { 2154 pr_err("Received logout request on connection that" 2155 " is not in logged in state, ignoring request.\n"); 2156 return 0; 2157 } 2158 2159 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 2160 if (!cmd) 2161 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 2162 buf, conn); 2163 2164 cmd->iscsi_opcode = ISCSI_OP_LOGOUT; 2165 cmd->i_state = ISTATE_SEND_LOGOUTRSP; 2166 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 2167 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 2168 cmd->targ_xfer_tag = 0xFFFFFFFF; 2169 cmd->cmd_sn = hdr->cmdsn; 2170 cmd->exp_stat_sn = hdr->exp_statsn; 2171 cmd->logout_cid = hdr->cid; 2172 cmd->logout_reason = reason_code; 2173 cmd->data_direction = DMA_NONE; 2174 2175 /* 2176 * We need to sleep in these cases (by returning 1) until the Logout 2177 * Response gets sent in the tx thread. 2178 */ 2179 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) || 2180 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) && 2181 (hdr->cid == conn->cid))) 2182 logout_remove = 1; 2183 2184 spin_lock_bh(&conn->cmd_lock); 2185 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 2186 spin_unlock_bh(&conn->cmd_lock); 2187 2188 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY) 2189 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 2190 2191 /* 2192 * Immediate commands are executed, well, immediately. 2193 * Non-Immediate Logout Commands are executed in CmdSN order. 2194 */ 2195 if (cmd->immediate_cmd) { 2196 int ret = iscsit_execute_cmd(cmd, 0); 2197 2198 if (ret < 0) 2199 return ret; 2200 } else { 2201 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 2202 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 2203 logout_remove = 0; 2204 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { 2205 return iscsit_add_reject_from_cmd( 2206 ISCSI_REASON_PROTOCOL_ERROR, 2207 1, 0, buf, cmd); 2208 } 2209 } 2210 2211 return logout_remove; 2212 } 2213 2214 static int iscsit_handle_snack( 2215 struct iscsi_conn *conn, 2216 unsigned char *buf) 2217 { 2218 struct iscsi_snack *hdr; 2219 2220 hdr = (struct iscsi_snack *) buf; 2221 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 2222 hdr->itt = be32_to_cpu(hdr->itt); 2223 hdr->ttt = be32_to_cpu(hdr->ttt); 2224 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 2225 hdr->begrun = be32_to_cpu(hdr->begrun); 2226 hdr->runlength = be32_to_cpu(hdr->runlength); 2227 2228 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:" 2229 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x," 2230 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags, 2231 hdr->begrun, hdr->runlength, conn->cid); 2232 2233 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2234 pr_err("Initiator sent SNACK request while in" 2235 " ErrorRecoveryLevel=0.\n"); 2236 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 2237 buf, conn); 2238 } 2239 /* 2240 * SNACK_DATA and SNACK_R2T are both 0, so check which function to 2241 * call from inside iscsi_send_recovery_datain_or_r2t(). 2242 */ 2243 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) { 2244 case 0: 2245 return iscsit_handle_recovery_datain_or_r2t(conn, buf, 2246 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); 2247 case ISCSI_FLAG_SNACK_TYPE_STATUS: 2248 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, 2249 hdr->begrun, hdr->runlength); 2250 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK: 2251 return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun, 2252 hdr->runlength); 2253 case ISCSI_FLAG_SNACK_TYPE_RDATA: 2254 /* FIXME: Support R-Data SNACK */ 2255 pr_err("R-Data SNACK Not Supported.\n"); 2256 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 2257 buf, conn); 2258 default: 2259 pr_err("Unknown SNACK type 0x%02x, protocol" 2260 " error.\n", hdr->flags & 0x0f); 2261 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 2262 buf, conn); 2263 } 2264 2265 return 0; 2266 } 2267 2268 static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn) 2269 { 2270 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || 2271 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { 2272 wait_for_completion_interruptible_timeout( 2273 &conn->rx_half_close_comp, 2274 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ); 2275 } 2276 } 2277 2278 static int iscsit_handle_immediate_data( 2279 struct iscsi_cmd *cmd, 2280 unsigned char *buf, 2281 u32 length) 2282 { 2283 int iov_ret, rx_got = 0, rx_size = 0; 2284 u32 checksum, iov_count = 0, padding = 0; 2285 struct iscsi_conn *conn = cmd->conn; 2286 struct kvec *iov; 2287 2288 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length); 2289 if (iov_ret < 0) 2290 return IMMEDIATE_DATA_CANNOT_RECOVER; 2291 2292 rx_size = length; 2293 iov_count = iov_ret; 2294 iov = &cmd->iov_data[0]; 2295 2296 padding = ((-length) & 3); 2297 if (padding != 0) { 2298 iov[iov_count].iov_base = cmd->pad_bytes; 2299 iov[iov_count++].iov_len = padding; 2300 rx_size += padding; 2301 } 2302 2303 if (conn->conn_ops->DataDigest) { 2304 iov[iov_count].iov_base = &checksum; 2305 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 2306 rx_size += ISCSI_CRC_LEN; 2307 } 2308 2309 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); 2310 2311 iscsit_unmap_iovec(cmd); 2312 2313 if (rx_got != rx_size) { 2314 iscsit_rx_thread_wait_for_tcp(conn); 2315 return IMMEDIATE_DATA_CANNOT_RECOVER; 2316 } 2317 2318 if (conn->conn_ops->DataDigest) { 2319 u32 data_crc; 2320 2321 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, 2322 cmd->write_data_done, length, padding, 2323 cmd->pad_bytes); 2324 2325 if (checksum != data_crc) { 2326 pr_err("ImmediateData CRC32C DataDigest 0x%08x" 2327 " does not match computed 0x%08x\n", checksum, 2328 data_crc); 2329 2330 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2331 pr_err("Unable to recover from" 2332 " Immediate Data digest failure while" 2333 " in ERL=0.\n"); 2334 iscsit_add_reject_from_cmd( 2335 ISCSI_REASON_DATA_DIGEST_ERROR, 2336 1, 0, buf, cmd); 2337 return IMMEDIATE_DATA_CANNOT_RECOVER; 2338 } else { 2339 iscsit_add_reject_from_cmd( 2340 ISCSI_REASON_DATA_DIGEST_ERROR, 2341 0, 0, buf, cmd); 2342 return IMMEDIATE_DATA_ERL1_CRC_FAILURE; 2343 } 2344 } else { 2345 pr_debug("Got CRC32C DataDigest 0x%08x for" 2346 " %u bytes of Immediate Data\n", checksum, 2347 length); 2348 } 2349 } 2350 2351 cmd->write_data_done += length; 2352 2353 if (cmd->write_data_done == cmd->se_cmd.data_length) { 2354 spin_lock_bh(&cmd->istate_lock); 2355 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 2356 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 2357 spin_unlock_bh(&cmd->istate_lock); 2358 } 2359 2360 return IMMEDIATE_DATA_NORMAL_OPERATION; 2361 } 2362 2363 /* 2364 * Called with sess->conn_lock held. 2365 */ 2366 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections 2367 with active network interface */ 2368 static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) 2369 { 2370 struct iscsi_cmd *cmd; 2371 struct iscsi_conn *conn_p; 2372 2373 /* 2374 * Only send a Asynchronous Message on connections whos network 2375 * interface is still functional. 2376 */ 2377 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) { 2378 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) { 2379 iscsit_inc_conn_usage_count(conn_p); 2380 break; 2381 } 2382 } 2383 2384 if (!conn_p) 2385 return; 2386 2387 cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL); 2388 if (!cmd) { 2389 iscsit_dec_conn_usage_count(conn_p); 2390 return; 2391 } 2392 2393 cmd->logout_cid = conn->cid; 2394 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2395 cmd->i_state = ISTATE_SEND_ASYNCMSG; 2396 2397 spin_lock_bh(&conn_p->cmd_lock); 2398 list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list); 2399 spin_unlock_bh(&conn_p->cmd_lock); 2400 2401 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state); 2402 iscsit_dec_conn_usage_count(conn_p); 2403 } 2404 2405 static int iscsit_send_conn_drop_async_message( 2406 struct iscsi_cmd *cmd, 2407 struct iscsi_conn *conn) 2408 { 2409 struct iscsi_async *hdr; 2410 2411 cmd->tx_size = ISCSI_HDR_LEN; 2412 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2413 2414 hdr = (struct iscsi_async *) cmd->pdu; 2415 hdr->opcode = ISCSI_OP_ASYNC_EVENT; 2416 hdr->flags = ISCSI_FLAG_CMD_FINAL; 2417 cmd->init_task_tag = 0xFFFFFFFF; 2418 cmd->targ_xfer_tag = 0xFFFFFFFF; 2419 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]); 2420 cmd->stat_sn = conn->stat_sn++; 2421 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2422 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2423 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2424 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION; 2425 hdr->param1 = cpu_to_be16(cmd->logout_cid); 2426 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait); 2427 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain); 2428 2429 if (conn->conn_ops->HeaderDigest) { 2430 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2431 2432 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2433 (unsigned char *)hdr, ISCSI_HDR_LEN, 2434 0, NULL, (u8 *)header_digest); 2435 2436 cmd->tx_size += ISCSI_CRC_LEN; 2437 pr_debug("Attaching CRC32C HeaderDigest to" 2438 " Async Message 0x%08x\n", *header_digest); 2439 } 2440 2441 cmd->iov_misc[0].iov_base = cmd->pdu; 2442 cmd->iov_misc[0].iov_len = cmd->tx_size; 2443 cmd->iov_misc_count = 1; 2444 2445 pr_debug("Sending Connection Dropped Async Message StatSN:" 2446 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn, 2447 cmd->logout_cid, conn->cid); 2448 return 0; 2449 } 2450 2451 static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) 2452 { 2453 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || 2454 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { 2455 wait_for_completion_interruptible_timeout( 2456 &conn->tx_half_close_comp, 2457 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ); 2458 } 2459 } 2460 2461 static int iscsit_send_data_in( 2462 struct iscsi_cmd *cmd, 2463 struct iscsi_conn *conn) 2464 { 2465 int iov_ret = 0, set_statsn = 0; 2466 u32 iov_count = 0, tx_size = 0; 2467 struct iscsi_datain datain; 2468 struct iscsi_datain_req *dr; 2469 struct iscsi_data_rsp *hdr; 2470 struct kvec *iov; 2471 int eodr = 0; 2472 int ret; 2473 2474 memset(&datain, 0, sizeof(struct iscsi_datain)); 2475 dr = iscsit_get_datain_values(cmd, &datain); 2476 if (!dr) { 2477 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n", 2478 cmd->init_task_tag); 2479 return -1; 2480 } 2481 2482 /* 2483 * Be paranoid and double check the logic for now. 2484 */ 2485 if ((datain.offset + datain.length) > cmd->se_cmd.data_length) { 2486 pr_err("Command ITT: 0x%08x, datain.offset: %u and" 2487 " datain.length: %u exceeds cmd->data_length: %u\n", 2488 cmd->init_task_tag, datain.offset, datain.length, 2489 cmd->se_cmd.data_length); 2490 return -1; 2491 } 2492 2493 spin_lock_bh(&conn->sess->session_stats_lock); 2494 conn->sess->tx_data_octets += datain.length; 2495 if (conn->sess->se_sess->se_node_acl) { 2496 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); 2497 conn->sess->se_sess->se_node_acl->read_bytes += datain.length; 2498 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); 2499 } 2500 spin_unlock_bh(&conn->sess->session_stats_lock); 2501 /* 2502 * Special case for successfully execution w/ both DATAIN 2503 * and Sense Data. 2504 */ 2505 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) && 2506 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) 2507 datain.flags &= ~ISCSI_FLAG_DATA_STATUS; 2508 else { 2509 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) || 2510 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) { 2511 iscsit_increment_maxcmdsn(cmd, conn->sess); 2512 cmd->stat_sn = conn->stat_sn++; 2513 set_statsn = 1; 2514 } else if (dr->dr_complete == 2515 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY) 2516 set_statsn = 1; 2517 } 2518 2519 hdr = (struct iscsi_data_rsp *) cmd->pdu; 2520 memset(hdr, 0, ISCSI_HDR_LEN); 2521 hdr->opcode = ISCSI_OP_SCSI_DATA_IN; 2522 hdr->flags = datain.flags; 2523 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 2524 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 2525 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; 2526 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2527 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 2528 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; 2529 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2530 } 2531 } 2532 hton24(hdr->dlength, datain.length); 2533 if (hdr->flags & ISCSI_FLAG_DATA_ACK) 2534 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2535 (struct scsi_lun *)&hdr->lun); 2536 else 2537 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2538 2539 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2540 hdr->ttt = (hdr->flags & ISCSI_FLAG_DATA_ACK) ? 2541 cpu_to_be32(cmd->targ_xfer_tag) : 2542 0xFFFFFFFF; 2543 hdr->statsn = (set_statsn) ? cpu_to_be32(cmd->stat_sn) : 2544 0xFFFFFFFF; 2545 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2546 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2547 hdr->datasn = cpu_to_be32(datain.data_sn); 2548 hdr->offset = cpu_to_be32(datain.offset); 2549 2550 iov = &cmd->iov_data[0]; 2551 iov[iov_count].iov_base = cmd->pdu; 2552 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 2553 tx_size += ISCSI_HDR_LEN; 2554 2555 if (conn->conn_ops->HeaderDigest) { 2556 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2557 2558 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2559 (unsigned char *)hdr, ISCSI_HDR_LEN, 2560 0, NULL, (u8 *)header_digest); 2561 2562 iov[0].iov_len += ISCSI_CRC_LEN; 2563 tx_size += ISCSI_CRC_LEN; 2564 2565 pr_debug("Attaching CRC32 HeaderDigest" 2566 " for DataIN PDU 0x%08x\n", *header_digest); 2567 } 2568 2569 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length); 2570 if (iov_ret < 0) 2571 return -1; 2572 2573 iov_count += iov_ret; 2574 tx_size += datain.length; 2575 2576 cmd->padding = ((-datain.length) & 3); 2577 if (cmd->padding) { 2578 iov[iov_count].iov_base = cmd->pad_bytes; 2579 iov[iov_count++].iov_len = cmd->padding; 2580 tx_size += cmd->padding; 2581 2582 pr_debug("Attaching %u padding bytes\n", 2583 cmd->padding); 2584 } 2585 if (conn->conn_ops->DataDigest) { 2586 cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd, 2587 datain.offset, datain.length, cmd->padding, cmd->pad_bytes); 2588 2589 iov[iov_count].iov_base = &cmd->data_crc; 2590 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 2591 tx_size += ISCSI_CRC_LEN; 2592 2593 pr_debug("Attached CRC32C DataDigest %d bytes, crc" 2594 " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc); 2595 } 2596 2597 cmd->iov_data_count = iov_count; 2598 cmd->tx_size = tx_size; 2599 2600 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x," 2601 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 2602 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), 2603 ntohl(hdr->offset), datain.length, conn->cid); 2604 2605 /* sendpage is preferred but can't insert markers */ 2606 if (!conn->conn_ops->IFMarker) 2607 ret = iscsit_fe_sendpage_sg(cmd, conn); 2608 else 2609 ret = iscsit_send_tx_data(cmd, conn, 0); 2610 2611 iscsit_unmap_iovec(cmd); 2612 2613 if (ret < 0) { 2614 iscsit_tx_thread_wait_for_tcp(conn); 2615 return ret; 2616 } 2617 2618 if (dr->dr_complete) { 2619 eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? 2620 2 : 1; 2621 iscsit_free_datain_req(cmd, dr); 2622 } 2623 2624 return eodr; 2625 } 2626 2627 static int iscsit_send_logout_response( 2628 struct iscsi_cmd *cmd, 2629 struct iscsi_conn *conn) 2630 { 2631 int niov = 0, tx_size; 2632 struct iscsi_conn *logout_conn = NULL; 2633 struct iscsi_conn_recovery *cr = NULL; 2634 struct iscsi_session *sess = conn->sess; 2635 struct kvec *iov; 2636 struct iscsi_logout_rsp *hdr; 2637 /* 2638 * The actual shutting down of Sessions and/or Connections 2639 * for CLOSESESSION and CLOSECONNECTION Logout Requests 2640 * is done in scsi_logout_post_handler(). 2641 */ 2642 switch (cmd->logout_reason) { 2643 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 2644 pr_debug("iSCSI session logout successful, setting" 2645 " logout response to ISCSI_LOGOUT_SUCCESS.\n"); 2646 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2647 break; 2648 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 2649 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND) 2650 break; 2651 /* 2652 * For CLOSECONNECTION logout requests carrying 2653 * a matching logout CID -> local CID, the reference 2654 * for the local CID will have been incremented in 2655 * iscsi_logout_closeconnection(). 2656 * 2657 * For CLOSECONNECTION logout requests carrying 2658 * a different CID than the connection it arrived 2659 * on, the connection responding to cmd->logout_cid 2660 * is stopped in iscsit_logout_post_handler_diffcid(). 2661 */ 2662 2663 pr_debug("iSCSI CID: %hu logout on CID: %hu" 2664 " successful.\n", cmd->logout_cid, conn->cid); 2665 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2666 break; 2667 case ISCSI_LOGOUT_REASON_RECOVERY: 2668 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) || 2669 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED)) 2670 break; 2671 /* 2672 * If the connection is still active from our point of view 2673 * force connection recovery to occur. 2674 */ 2675 logout_conn = iscsit_get_conn_from_cid_rcfr(sess, 2676 cmd->logout_cid); 2677 if (logout_conn) { 2678 iscsit_connection_reinstatement_rcfr(logout_conn); 2679 iscsit_dec_conn_usage_count(logout_conn); 2680 } 2681 2682 cr = iscsit_get_inactive_connection_recovery_entry( 2683 conn->sess, cmd->logout_cid); 2684 if (!cr) { 2685 pr_err("Unable to locate CID: %hu for" 2686 " REMOVECONNFORRECOVERY Logout Request.\n", 2687 cmd->logout_cid); 2688 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; 2689 break; 2690 } 2691 2692 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn); 2693 2694 pr_debug("iSCSI REMOVECONNFORRECOVERY logout" 2695 " for recovery for CID: %hu on CID: %hu successful.\n", 2696 cmd->logout_cid, conn->cid); 2697 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2698 break; 2699 default: 2700 pr_err("Unknown cmd->logout_reason: 0x%02x\n", 2701 cmd->logout_reason); 2702 return -1; 2703 } 2704 2705 tx_size = ISCSI_HDR_LEN; 2706 hdr = (struct iscsi_logout_rsp *)cmd->pdu; 2707 memset(hdr, 0, ISCSI_HDR_LEN); 2708 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 2709 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2710 hdr->response = cmd->logout_response; 2711 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2712 cmd->stat_sn = conn->stat_sn++; 2713 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2714 2715 iscsit_increment_maxcmdsn(cmd, conn->sess); 2716 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2717 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2718 2719 iov = &cmd->iov_misc[0]; 2720 iov[niov].iov_base = cmd->pdu; 2721 iov[niov++].iov_len = ISCSI_HDR_LEN; 2722 2723 if (conn->conn_ops->HeaderDigest) { 2724 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2725 2726 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2727 (unsigned char *)hdr, ISCSI_HDR_LEN, 2728 0, NULL, (u8 *)header_digest); 2729 2730 iov[0].iov_len += ISCSI_CRC_LEN; 2731 tx_size += ISCSI_CRC_LEN; 2732 pr_debug("Attaching CRC32C HeaderDigest to" 2733 " Logout Response 0x%08x\n", *header_digest); 2734 } 2735 cmd->iov_misc_count = niov; 2736 cmd->tx_size = tx_size; 2737 2738 pr_debug("Sending Logout Response ITT: 0x%08x StatSN:" 2739 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n", 2740 cmd->init_task_tag, cmd->stat_sn, hdr->response, 2741 cmd->logout_cid, conn->cid); 2742 2743 return 0; 2744 } 2745 2746 /* 2747 * Unsolicited NOPIN, either requesting a response or not. 2748 */ 2749 static int iscsit_send_unsolicited_nopin( 2750 struct iscsi_cmd *cmd, 2751 struct iscsi_conn *conn, 2752 int want_response) 2753 { 2754 int tx_size = ISCSI_HDR_LEN; 2755 struct iscsi_nopin *hdr; 2756 int ret; 2757 2758 hdr = (struct iscsi_nopin *) cmd->pdu; 2759 memset(hdr, 0, ISCSI_HDR_LEN); 2760 hdr->opcode = ISCSI_OP_NOOP_IN; 2761 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2762 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2763 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2764 cmd->stat_sn = conn->stat_sn; 2765 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2766 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2767 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2768 2769 if (conn->conn_ops->HeaderDigest) { 2770 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2771 2772 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2773 (unsigned char *)hdr, ISCSI_HDR_LEN, 2774 0, NULL, (u8 *)header_digest); 2775 2776 tx_size += ISCSI_CRC_LEN; 2777 pr_debug("Attaching CRC32C HeaderDigest to" 2778 " NopIN 0x%08x\n", *header_digest); 2779 } 2780 2781 cmd->iov_misc[0].iov_base = cmd->pdu; 2782 cmd->iov_misc[0].iov_len = tx_size; 2783 cmd->iov_misc_count = 1; 2784 cmd->tx_size = tx_size; 2785 2786 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" 2787 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); 2788 2789 ret = iscsit_send_tx_data(cmd, conn, 1); 2790 if (ret < 0) { 2791 iscsit_tx_thread_wait_for_tcp(conn); 2792 return ret; 2793 } 2794 2795 spin_lock_bh(&cmd->istate_lock); 2796 cmd->i_state = want_response ? 2797 ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS; 2798 spin_unlock_bh(&cmd->istate_lock); 2799 2800 return 0; 2801 } 2802 2803 static int iscsit_send_nopin_response( 2804 struct iscsi_cmd *cmd, 2805 struct iscsi_conn *conn) 2806 { 2807 int niov = 0, tx_size; 2808 u32 padding = 0; 2809 struct kvec *iov; 2810 struct iscsi_nopin *hdr; 2811 2812 tx_size = ISCSI_HDR_LEN; 2813 hdr = (struct iscsi_nopin *) cmd->pdu; 2814 memset(hdr, 0, ISCSI_HDR_LEN); 2815 hdr->opcode = ISCSI_OP_NOOP_IN; 2816 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2817 hton24(hdr->dlength, cmd->buf_ptr_size); 2818 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2819 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2820 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2821 cmd->stat_sn = conn->stat_sn++; 2822 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2823 2824 iscsit_increment_maxcmdsn(cmd, conn->sess); 2825 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2826 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2827 2828 iov = &cmd->iov_misc[0]; 2829 iov[niov].iov_base = cmd->pdu; 2830 iov[niov++].iov_len = ISCSI_HDR_LEN; 2831 2832 if (conn->conn_ops->HeaderDigest) { 2833 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2834 2835 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2836 (unsigned char *)hdr, ISCSI_HDR_LEN, 2837 0, NULL, (u8 *)header_digest); 2838 2839 iov[0].iov_len += ISCSI_CRC_LEN; 2840 tx_size += ISCSI_CRC_LEN; 2841 pr_debug("Attaching CRC32C HeaderDigest" 2842 " to NopIn 0x%08x\n", *header_digest); 2843 } 2844 2845 /* 2846 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr. 2847 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size. 2848 */ 2849 if (cmd->buf_ptr_size) { 2850 iov[niov].iov_base = cmd->buf_ptr; 2851 iov[niov++].iov_len = cmd->buf_ptr_size; 2852 tx_size += cmd->buf_ptr_size; 2853 2854 pr_debug("Echoing back %u bytes of ping" 2855 " data.\n", cmd->buf_ptr_size); 2856 2857 padding = ((-cmd->buf_ptr_size) & 3); 2858 if (padding != 0) { 2859 iov[niov].iov_base = &cmd->pad_bytes; 2860 iov[niov++].iov_len = padding; 2861 tx_size += padding; 2862 pr_debug("Attaching %u additional" 2863 " padding bytes.\n", padding); 2864 } 2865 if (conn->conn_ops->DataDigest) { 2866 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2867 cmd->buf_ptr, cmd->buf_ptr_size, 2868 padding, (u8 *)&cmd->pad_bytes, 2869 (u8 *)&cmd->data_crc); 2870 2871 iov[niov].iov_base = &cmd->data_crc; 2872 iov[niov++].iov_len = ISCSI_CRC_LEN; 2873 tx_size += ISCSI_CRC_LEN; 2874 pr_debug("Attached DataDigest for %u" 2875 " bytes of ping data, CRC 0x%08x\n", 2876 cmd->buf_ptr_size, cmd->data_crc); 2877 } 2878 } 2879 2880 cmd->iov_misc_count = niov; 2881 cmd->tx_size = tx_size; 2882 2883 pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:" 2884 " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag, 2885 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); 2886 2887 return 0; 2888 } 2889 2890 static int iscsit_send_r2t( 2891 struct iscsi_cmd *cmd, 2892 struct iscsi_conn *conn) 2893 { 2894 int tx_size = 0; 2895 struct iscsi_r2t *r2t; 2896 struct iscsi_r2t_rsp *hdr; 2897 int ret; 2898 2899 r2t = iscsit_get_r2t_from_list(cmd); 2900 if (!r2t) 2901 return -1; 2902 2903 hdr = (struct iscsi_r2t_rsp *) cmd->pdu; 2904 memset(hdr, 0, ISCSI_HDR_LEN); 2905 hdr->opcode = ISCSI_OP_R2T; 2906 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2907 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2908 (struct scsi_lun *)&hdr->lun); 2909 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2910 spin_lock_bh(&conn->sess->ttt_lock); 2911 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++; 2912 if (r2t->targ_xfer_tag == 0xFFFFFFFF) 2913 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++; 2914 spin_unlock_bh(&conn->sess->ttt_lock); 2915 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); 2916 hdr->statsn = cpu_to_be32(conn->stat_sn); 2917 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2918 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2919 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn); 2920 hdr->data_offset = cpu_to_be32(r2t->offset); 2921 hdr->data_length = cpu_to_be32(r2t->xfer_len); 2922 2923 cmd->iov_misc[0].iov_base = cmd->pdu; 2924 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; 2925 tx_size += ISCSI_HDR_LEN; 2926 2927 if (conn->conn_ops->HeaderDigest) { 2928 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2929 2930 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2931 (unsigned char *)hdr, ISCSI_HDR_LEN, 2932 0, NULL, (u8 *)header_digest); 2933 2934 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 2935 tx_size += ISCSI_CRC_LEN; 2936 pr_debug("Attaching CRC32 HeaderDigest for R2T" 2937 " PDU 0x%08x\n", *header_digest); 2938 } 2939 2940 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:" 2941 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n", 2942 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag, 2943 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn, 2944 r2t->offset, r2t->xfer_len, conn->cid); 2945 2946 cmd->iov_misc_count = 1; 2947 cmd->tx_size = tx_size; 2948 2949 spin_lock_bh(&cmd->r2t_lock); 2950 r2t->sent_r2t = 1; 2951 spin_unlock_bh(&cmd->r2t_lock); 2952 2953 ret = iscsit_send_tx_data(cmd, conn, 1); 2954 if (ret < 0) { 2955 iscsit_tx_thread_wait_for_tcp(conn); 2956 return ret; 2957 } 2958 2959 spin_lock_bh(&cmd->dataout_timeout_lock); 2960 iscsit_start_dataout_timer(cmd, conn); 2961 spin_unlock_bh(&cmd->dataout_timeout_lock); 2962 2963 return 0; 2964 } 2965 2966 /* 2967 * @recovery: If called from iscsi_task_reassign_complete_write() for 2968 * connection recovery. 2969 */ 2970 int iscsit_build_r2ts_for_cmd( 2971 struct iscsi_cmd *cmd, 2972 struct iscsi_conn *conn, 2973 bool recovery) 2974 { 2975 int first_r2t = 1; 2976 u32 offset = 0, xfer_len = 0; 2977 2978 spin_lock_bh(&cmd->r2t_lock); 2979 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) { 2980 spin_unlock_bh(&cmd->r2t_lock); 2981 return 0; 2982 } 2983 2984 if (conn->sess->sess_ops->DataSequenceInOrder && 2985 !recovery) 2986 cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done); 2987 2988 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) { 2989 if (conn->sess->sess_ops->DataSequenceInOrder) { 2990 offset = cmd->r2t_offset; 2991 2992 if (first_r2t && recovery) { 2993 int new_data_end = offset + 2994 conn->sess->sess_ops->MaxBurstLength - 2995 cmd->next_burst_len; 2996 2997 if (new_data_end > cmd->se_cmd.data_length) 2998 xfer_len = cmd->se_cmd.data_length - offset; 2999 else 3000 xfer_len = 3001 conn->sess->sess_ops->MaxBurstLength - 3002 cmd->next_burst_len; 3003 } else { 3004 int new_data_end = offset + 3005 conn->sess->sess_ops->MaxBurstLength; 3006 3007 if (new_data_end > cmd->se_cmd.data_length) 3008 xfer_len = cmd->se_cmd.data_length - offset; 3009 else 3010 xfer_len = conn->sess->sess_ops->MaxBurstLength; 3011 } 3012 cmd->r2t_offset += xfer_len; 3013 3014 if (cmd->r2t_offset == cmd->se_cmd.data_length) 3015 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 3016 } else { 3017 struct iscsi_seq *seq; 3018 3019 seq = iscsit_get_seq_holder_for_r2t(cmd); 3020 if (!seq) { 3021 spin_unlock_bh(&cmd->r2t_lock); 3022 return -1; 3023 } 3024 3025 offset = seq->offset; 3026 xfer_len = seq->xfer_len; 3027 3028 if (cmd->seq_send_order == cmd->seq_count) 3029 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 3030 } 3031 cmd->outstanding_r2ts++; 3032 first_r2t = 0; 3033 3034 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) { 3035 spin_unlock_bh(&cmd->r2t_lock); 3036 return -1; 3037 } 3038 3039 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) 3040 break; 3041 } 3042 spin_unlock_bh(&cmd->r2t_lock); 3043 3044 return 0; 3045 } 3046 3047 static int iscsit_send_status( 3048 struct iscsi_cmd *cmd, 3049 struct iscsi_conn *conn) 3050 { 3051 u8 iov_count = 0, recovery; 3052 u32 padding = 0, tx_size = 0; 3053 struct iscsi_scsi_rsp *hdr; 3054 struct kvec *iov; 3055 3056 recovery = (cmd->i_state != ISTATE_SEND_STATUS); 3057 if (!recovery) 3058 cmd->stat_sn = conn->stat_sn++; 3059 3060 spin_lock_bh(&conn->sess->session_stats_lock); 3061 conn->sess->rsp_pdus++; 3062 spin_unlock_bh(&conn->sess->session_stats_lock); 3063 3064 hdr = (struct iscsi_scsi_rsp *) cmd->pdu; 3065 memset(hdr, 0, ISCSI_HDR_LEN); 3066 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; 3067 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3068 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 3069 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW; 3070 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 3071 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 3072 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; 3073 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 3074 } 3075 hdr->response = cmd->iscsi_response; 3076 hdr->cmd_status = cmd->se_cmd.scsi_status; 3077 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3078 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3079 3080 iscsit_increment_maxcmdsn(cmd, conn->sess); 3081 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3082 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3083 3084 iov = &cmd->iov_misc[0]; 3085 iov[iov_count].iov_base = cmd->pdu; 3086 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3087 tx_size += ISCSI_HDR_LEN; 3088 3089 /* 3090 * Attach SENSE DATA payload to iSCSI Response PDU 3091 */ 3092 if (cmd->se_cmd.sense_buffer && 3093 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 3094 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 3095 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 3096 hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length); 3097 iov[iov_count].iov_base = cmd->se_cmd.sense_buffer; 3098 iov[iov_count++].iov_len = 3099 (cmd->se_cmd.scsi_sense_length + padding); 3100 tx_size += cmd->se_cmd.scsi_sense_length; 3101 3102 if (padding) { 3103 memset(cmd->se_cmd.sense_buffer + 3104 cmd->se_cmd.scsi_sense_length, 0, padding); 3105 tx_size += padding; 3106 pr_debug("Adding %u bytes of padding to" 3107 " SENSE.\n", padding); 3108 } 3109 3110 if (conn->conn_ops->DataDigest) { 3111 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3112 cmd->se_cmd.sense_buffer, 3113 (cmd->se_cmd.scsi_sense_length + padding), 3114 0, NULL, (u8 *)&cmd->data_crc); 3115 3116 iov[iov_count].iov_base = &cmd->data_crc; 3117 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3118 tx_size += ISCSI_CRC_LEN; 3119 3120 pr_debug("Attaching CRC32 DataDigest for" 3121 " SENSE, %u bytes CRC 0x%08x\n", 3122 (cmd->se_cmd.scsi_sense_length + padding), 3123 cmd->data_crc); 3124 } 3125 3126 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI" 3127 " Response PDU\n", 3128 cmd->se_cmd.scsi_sense_length); 3129 } 3130 3131 if (conn->conn_ops->HeaderDigest) { 3132 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3133 3134 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3135 (unsigned char *)hdr, ISCSI_HDR_LEN, 3136 0, NULL, (u8 *)header_digest); 3137 3138 iov[0].iov_len += ISCSI_CRC_LEN; 3139 tx_size += ISCSI_CRC_LEN; 3140 pr_debug("Attaching CRC32 HeaderDigest for Response" 3141 " PDU 0x%08x\n", *header_digest); 3142 } 3143 3144 cmd->iov_misc_count = iov_count; 3145 cmd->tx_size = tx_size; 3146 3147 pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x," 3148 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n", 3149 (!recovery) ? "" : "Recovery ", cmd->init_task_tag, 3150 cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid); 3151 3152 return 0; 3153 } 3154 3155 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr) 3156 { 3157 switch (se_tmr->response) { 3158 case TMR_FUNCTION_COMPLETE: 3159 return ISCSI_TMF_RSP_COMPLETE; 3160 case TMR_TASK_DOES_NOT_EXIST: 3161 return ISCSI_TMF_RSP_NO_TASK; 3162 case TMR_LUN_DOES_NOT_EXIST: 3163 return ISCSI_TMF_RSP_NO_LUN; 3164 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: 3165 return ISCSI_TMF_RSP_NOT_SUPPORTED; 3166 case TMR_FUNCTION_AUTHORIZATION_FAILED: 3167 return ISCSI_TMF_RSP_AUTH_FAILED; 3168 case TMR_FUNCTION_REJECTED: 3169 default: 3170 return ISCSI_TMF_RSP_REJECTED; 3171 } 3172 } 3173 3174 static int iscsit_send_task_mgt_rsp( 3175 struct iscsi_cmd *cmd, 3176 struct iscsi_conn *conn) 3177 { 3178 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; 3179 struct iscsi_tm_rsp *hdr; 3180 u32 tx_size = 0; 3181 3182 hdr = (struct iscsi_tm_rsp *) cmd->pdu; 3183 memset(hdr, 0, ISCSI_HDR_LEN); 3184 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 3185 hdr->flags = ISCSI_FLAG_CMD_FINAL; 3186 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); 3187 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3188 cmd->stat_sn = conn->stat_sn++; 3189 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3190 3191 iscsit_increment_maxcmdsn(cmd, conn->sess); 3192 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3193 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3194 3195 cmd->iov_misc[0].iov_base = cmd->pdu; 3196 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; 3197 tx_size += ISCSI_HDR_LEN; 3198 3199 if (conn->conn_ops->HeaderDigest) { 3200 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3201 3202 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3203 (unsigned char *)hdr, ISCSI_HDR_LEN, 3204 0, NULL, (u8 *)header_digest); 3205 3206 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 3207 tx_size += ISCSI_CRC_LEN; 3208 pr_debug("Attaching CRC32 HeaderDigest for Task" 3209 " Mgmt Response PDU 0x%08x\n", *header_digest); 3210 } 3211 3212 cmd->iov_misc_count = 1; 3213 cmd->tx_size = tx_size; 3214 3215 pr_debug("Built Task Management Response ITT: 0x%08x," 3216 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n", 3217 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid); 3218 3219 return 0; 3220 } 3221 3222 static bool iscsit_check_inaddr_any(struct iscsi_np *np) 3223 { 3224 bool ret = false; 3225 3226 if (np->np_sockaddr.ss_family == AF_INET6) { 3227 const struct sockaddr_in6 sin6 = { 3228 .sin6_addr = IN6ADDR_ANY_INIT }; 3229 struct sockaddr_in6 *sock_in6 = 3230 (struct sockaddr_in6 *)&np->np_sockaddr; 3231 3232 if (!memcmp(sock_in6->sin6_addr.s6_addr, 3233 sin6.sin6_addr.s6_addr, 16)) 3234 ret = true; 3235 } else { 3236 struct sockaddr_in * sock_in = 3237 (struct sockaddr_in *)&np->np_sockaddr; 3238 3239 if (sock_in->sin_addr.s_addr == INADDR_ANY) 3240 ret = true; 3241 } 3242 3243 return ret; 3244 } 3245 3246 #define SENDTARGETS_BUF_LIMIT 32768U 3247 3248 static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) 3249 { 3250 char *payload = NULL; 3251 struct iscsi_conn *conn = cmd->conn; 3252 struct iscsi_portal_group *tpg; 3253 struct iscsi_tiqn *tiqn; 3254 struct iscsi_tpg_np *tpg_np; 3255 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; 3256 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ 3257 3258 buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength, 3259 SENDTARGETS_BUF_LIMIT); 3260 3261 payload = kzalloc(buffer_len, GFP_KERNEL); 3262 if (!payload) { 3263 pr_err("Unable to allocate memory for sendtargets" 3264 " response.\n"); 3265 return -ENOMEM; 3266 } 3267 3268 spin_lock(&tiqn_lock); 3269 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 3270 len = sprintf(buf, "TargetName=%s", tiqn->tiqn); 3271 len += 1; 3272 3273 if ((len + payload_len) > buffer_len) { 3274 spin_unlock(&tiqn->tiqn_tpg_lock); 3275 end_of_buf = 1; 3276 goto eob; 3277 } 3278 memcpy(payload + payload_len, buf, len); 3279 payload_len += len; 3280 3281 spin_lock(&tiqn->tiqn_tpg_lock); 3282 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { 3283 3284 spin_lock(&tpg->tpg_state_lock); 3285 if ((tpg->tpg_state == TPG_STATE_FREE) || 3286 (tpg->tpg_state == TPG_STATE_INACTIVE)) { 3287 spin_unlock(&tpg->tpg_state_lock); 3288 continue; 3289 } 3290 spin_unlock(&tpg->tpg_state_lock); 3291 3292 spin_lock(&tpg->tpg_np_lock); 3293 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, 3294 tpg_np_list) { 3295 struct iscsi_np *np = tpg_np->tpg_np; 3296 bool inaddr_any = iscsit_check_inaddr_any(np); 3297 3298 len = sprintf(buf, "TargetAddress=" 3299 "%s%s%s:%hu,%hu", 3300 (np->np_sockaddr.ss_family == AF_INET6) ? 3301 "[" : "", (inaddr_any == false) ? 3302 np->np_ip : conn->local_ip, 3303 (np->np_sockaddr.ss_family == AF_INET6) ? 3304 "]" : "", (inaddr_any == false) ? 3305 np->np_port : conn->local_port, 3306 tpg->tpgt); 3307 len += 1; 3308 3309 if ((len + payload_len) > buffer_len) { 3310 spin_unlock(&tpg->tpg_np_lock); 3311 spin_unlock(&tiqn->tiqn_tpg_lock); 3312 end_of_buf = 1; 3313 goto eob; 3314 } 3315 memcpy(payload + payload_len, buf, len); 3316 payload_len += len; 3317 } 3318 spin_unlock(&tpg->tpg_np_lock); 3319 } 3320 spin_unlock(&tiqn->tiqn_tpg_lock); 3321 eob: 3322 if (end_of_buf) 3323 break; 3324 } 3325 spin_unlock(&tiqn_lock); 3326 3327 cmd->buf_ptr = payload; 3328 3329 return payload_len; 3330 } 3331 3332 /* 3333 * FIXME: Add support for F_BIT and C_BIT when the length is longer than 3334 * MaxRecvDataSegmentLength. 3335 */ 3336 static int iscsit_send_text_rsp( 3337 struct iscsi_cmd *cmd, 3338 struct iscsi_conn *conn) 3339 { 3340 struct iscsi_text_rsp *hdr; 3341 struct kvec *iov; 3342 u32 padding = 0, tx_size = 0; 3343 int text_length, iov_count = 0; 3344 3345 text_length = iscsit_build_sendtargets_response(cmd); 3346 if (text_length < 0) 3347 return text_length; 3348 3349 padding = ((-text_length) & 3); 3350 if (padding != 0) { 3351 memset(cmd->buf_ptr + text_length, 0, padding); 3352 pr_debug("Attaching %u additional bytes for" 3353 " padding.\n", padding); 3354 } 3355 3356 hdr = (struct iscsi_text_rsp *) cmd->pdu; 3357 memset(hdr, 0, ISCSI_HDR_LEN); 3358 hdr->opcode = ISCSI_OP_TEXT_RSP; 3359 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3360 hton24(hdr->dlength, text_length); 3361 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3362 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 3363 cmd->stat_sn = conn->stat_sn++; 3364 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3365 3366 iscsit_increment_maxcmdsn(cmd, conn->sess); 3367 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3368 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3369 3370 iov = &cmd->iov_misc[0]; 3371 3372 iov[iov_count].iov_base = cmd->pdu; 3373 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3374 iov[iov_count].iov_base = cmd->buf_ptr; 3375 iov[iov_count++].iov_len = text_length + padding; 3376 3377 tx_size += (ISCSI_HDR_LEN + text_length + padding); 3378 3379 if (conn->conn_ops->HeaderDigest) { 3380 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3381 3382 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3383 (unsigned char *)hdr, ISCSI_HDR_LEN, 3384 0, NULL, (u8 *)header_digest); 3385 3386 iov[0].iov_len += ISCSI_CRC_LEN; 3387 tx_size += ISCSI_CRC_LEN; 3388 pr_debug("Attaching CRC32 HeaderDigest for" 3389 " Text Response PDU 0x%08x\n", *header_digest); 3390 } 3391 3392 if (conn->conn_ops->DataDigest) { 3393 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3394 cmd->buf_ptr, (text_length + padding), 3395 0, NULL, (u8 *)&cmd->data_crc); 3396 3397 iov[iov_count].iov_base = &cmd->data_crc; 3398 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3399 tx_size += ISCSI_CRC_LEN; 3400 3401 pr_debug("Attaching DataDigest for %u bytes of text" 3402 " data, CRC 0x%08x\n", (text_length + padding), 3403 cmd->data_crc); 3404 } 3405 3406 cmd->iov_misc_count = iov_count; 3407 cmd->tx_size = tx_size; 3408 3409 pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x," 3410 " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn, 3411 text_length, conn->cid); 3412 return 0; 3413 } 3414 3415 static int iscsit_send_reject( 3416 struct iscsi_cmd *cmd, 3417 struct iscsi_conn *conn) 3418 { 3419 u32 iov_count = 0, tx_size = 0; 3420 struct iscsi_reject *hdr; 3421 struct kvec *iov; 3422 3423 hdr = (struct iscsi_reject *) cmd->pdu; 3424 hdr->opcode = ISCSI_OP_REJECT; 3425 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3426 hton24(hdr->dlength, ISCSI_HDR_LEN); 3427 cmd->stat_sn = conn->stat_sn++; 3428 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3429 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3430 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3431 3432 iov = &cmd->iov_misc[0]; 3433 3434 iov[iov_count].iov_base = cmd->pdu; 3435 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3436 iov[iov_count].iov_base = cmd->buf_ptr; 3437 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3438 3439 tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN); 3440 3441 if (conn->conn_ops->HeaderDigest) { 3442 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3443 3444 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3445 (unsigned char *)hdr, ISCSI_HDR_LEN, 3446 0, NULL, (u8 *)header_digest); 3447 3448 iov[0].iov_len += ISCSI_CRC_LEN; 3449 tx_size += ISCSI_CRC_LEN; 3450 pr_debug("Attaching CRC32 HeaderDigest for" 3451 " REJECT PDU 0x%08x\n", *header_digest); 3452 } 3453 3454 if (conn->conn_ops->DataDigest) { 3455 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3456 (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN, 3457 0, NULL, (u8 *)&cmd->data_crc); 3458 3459 iov[iov_count].iov_base = &cmd->data_crc; 3460 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3461 tx_size += ISCSI_CRC_LEN; 3462 pr_debug("Attaching CRC32 DataDigest for REJECT" 3463 " PDU 0x%08x\n", cmd->data_crc); 3464 } 3465 3466 cmd->iov_misc_count = iov_count; 3467 cmd->tx_size = tx_size; 3468 3469 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x," 3470 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid); 3471 3472 return 0; 3473 } 3474 3475 void iscsit_thread_get_cpumask(struct iscsi_conn *conn) 3476 { 3477 struct iscsi_thread_set *ts = conn->thread_set; 3478 int ord, cpu; 3479 /* 3480 * thread_id is assigned from iscsit_global->ts_bitmap from 3481 * within iscsi_thread_set.c:iscsi_allocate_thread_sets() 3482 * 3483 * Here we use thread_id to determine which CPU that this 3484 * iSCSI connection's iscsi_thread_set will be scheduled to 3485 * execute upon. 3486 */ 3487 ord = ts->thread_id % cpumask_weight(cpu_online_mask); 3488 for_each_online_cpu(cpu) { 3489 if (ord-- == 0) { 3490 cpumask_set_cpu(cpu, conn->conn_cpumask); 3491 return; 3492 } 3493 } 3494 /* 3495 * This should never be reached.. 3496 */ 3497 dump_stack(); 3498 cpumask_setall(conn->conn_cpumask); 3499 } 3500 3501 static inline void iscsit_thread_check_cpumask( 3502 struct iscsi_conn *conn, 3503 struct task_struct *p, 3504 int mode) 3505 { 3506 char buf[128]; 3507 /* 3508 * mode == 1 signals iscsi_target_tx_thread() usage. 3509 * mode == 0 signals iscsi_target_rx_thread() usage. 3510 */ 3511 if (mode == 1) { 3512 if (!conn->conn_tx_reset_cpumask) 3513 return; 3514 conn->conn_tx_reset_cpumask = 0; 3515 } else { 3516 if (!conn->conn_rx_reset_cpumask) 3517 return; 3518 conn->conn_rx_reset_cpumask = 0; 3519 } 3520 /* 3521 * Update the CPU mask for this single kthread so that 3522 * both TX and RX kthreads are scheduled to run on the 3523 * same CPU. 3524 */ 3525 memset(buf, 0, 128); 3526 cpumask_scnprintf(buf, 128, conn->conn_cpumask); 3527 set_cpus_allowed_ptr(p, conn->conn_cpumask); 3528 } 3529 3530 static int handle_immediate_queue(struct iscsi_conn *conn) 3531 { 3532 struct iscsi_queue_req *qr; 3533 struct iscsi_cmd *cmd; 3534 u8 state; 3535 int ret; 3536 3537 while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) { 3538 atomic_set(&conn->check_immediate_queue, 0); 3539 cmd = qr->cmd; 3540 state = qr->state; 3541 kmem_cache_free(lio_qr_cache, qr); 3542 3543 switch (state) { 3544 case ISTATE_SEND_R2T: 3545 ret = iscsit_send_r2t(cmd, conn); 3546 if (ret < 0) 3547 goto err; 3548 break; 3549 case ISTATE_REMOVE: 3550 if (cmd->data_direction == DMA_TO_DEVICE) 3551 iscsit_stop_dataout_timer(cmd); 3552 3553 spin_lock_bh(&conn->cmd_lock); 3554 list_del(&cmd->i_conn_node); 3555 spin_unlock_bh(&conn->cmd_lock); 3556 3557 iscsit_free_cmd(cmd); 3558 continue; 3559 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3560 iscsit_mod_nopin_response_timer(conn); 3561 ret = iscsit_send_unsolicited_nopin(cmd, 3562 conn, 1); 3563 if (ret < 0) 3564 goto err; 3565 break; 3566 case ISTATE_SEND_NOPIN_NO_RESPONSE: 3567 ret = iscsit_send_unsolicited_nopin(cmd, 3568 conn, 0); 3569 if (ret < 0) 3570 goto err; 3571 break; 3572 default: 3573 pr_err("Unknown Opcode: 0x%02x ITT:" 3574 " 0x%08x, i_state: %d on CID: %hu\n", 3575 cmd->iscsi_opcode, cmd->init_task_tag, state, 3576 conn->cid); 3577 goto err; 3578 } 3579 } 3580 3581 return 0; 3582 3583 err: 3584 return -1; 3585 } 3586 3587 static int handle_response_queue(struct iscsi_conn *conn) 3588 { 3589 struct iscsi_queue_req *qr; 3590 struct iscsi_cmd *cmd; 3591 u8 state; 3592 int ret; 3593 3594 while ((qr = iscsit_get_cmd_from_response_queue(conn))) { 3595 cmd = qr->cmd; 3596 state = qr->state; 3597 kmem_cache_free(lio_qr_cache, qr); 3598 3599 check_rsp_state: 3600 switch (state) { 3601 case ISTATE_SEND_DATAIN: 3602 ret = iscsit_send_data_in(cmd, conn); 3603 if (ret < 0) 3604 goto err; 3605 else if (!ret) 3606 /* more drs */ 3607 goto check_rsp_state; 3608 else if (ret == 1) { 3609 /* all done */ 3610 spin_lock_bh(&cmd->istate_lock); 3611 cmd->i_state = ISTATE_SENT_STATUS; 3612 spin_unlock_bh(&cmd->istate_lock); 3613 continue; 3614 } else if (ret == 2) { 3615 /* Still must send status, 3616 SCF_TRANSPORT_TASK_SENSE was set */ 3617 spin_lock_bh(&cmd->istate_lock); 3618 cmd->i_state = ISTATE_SEND_STATUS; 3619 spin_unlock_bh(&cmd->istate_lock); 3620 state = ISTATE_SEND_STATUS; 3621 goto check_rsp_state; 3622 } 3623 3624 break; 3625 case ISTATE_SEND_STATUS: 3626 case ISTATE_SEND_STATUS_RECOVERY: 3627 ret = iscsit_send_status(cmd, conn); 3628 break; 3629 case ISTATE_SEND_LOGOUTRSP: 3630 ret = iscsit_send_logout_response(cmd, conn); 3631 break; 3632 case ISTATE_SEND_ASYNCMSG: 3633 ret = iscsit_send_conn_drop_async_message( 3634 cmd, conn); 3635 break; 3636 case ISTATE_SEND_NOPIN: 3637 ret = iscsit_send_nopin_response(cmd, conn); 3638 break; 3639 case ISTATE_SEND_REJECT: 3640 ret = iscsit_send_reject(cmd, conn); 3641 break; 3642 case ISTATE_SEND_TASKMGTRSP: 3643 ret = iscsit_send_task_mgt_rsp(cmd, conn); 3644 if (ret != 0) 3645 break; 3646 ret = iscsit_tmr_post_handler(cmd, conn); 3647 if (ret != 0) 3648 iscsit_fall_back_to_erl0(conn->sess); 3649 break; 3650 case ISTATE_SEND_TEXTRSP: 3651 ret = iscsit_send_text_rsp(cmd, conn); 3652 break; 3653 default: 3654 pr_err("Unknown Opcode: 0x%02x ITT:" 3655 " 0x%08x, i_state: %d on CID: %hu\n", 3656 cmd->iscsi_opcode, cmd->init_task_tag, 3657 state, conn->cid); 3658 goto err; 3659 } 3660 if (ret < 0) 3661 goto err; 3662 3663 if (iscsit_send_tx_data(cmd, conn, 1) < 0) { 3664 iscsit_tx_thread_wait_for_tcp(conn); 3665 iscsit_unmap_iovec(cmd); 3666 goto err; 3667 } 3668 iscsit_unmap_iovec(cmd); 3669 3670 switch (state) { 3671 case ISTATE_SEND_LOGOUTRSP: 3672 if (!iscsit_logout_post_handler(cmd, conn)) 3673 goto restart; 3674 /* fall through */ 3675 case ISTATE_SEND_STATUS: 3676 case ISTATE_SEND_ASYNCMSG: 3677 case ISTATE_SEND_NOPIN: 3678 case ISTATE_SEND_STATUS_RECOVERY: 3679 case ISTATE_SEND_TEXTRSP: 3680 case ISTATE_SEND_TASKMGTRSP: 3681 spin_lock_bh(&cmd->istate_lock); 3682 cmd->i_state = ISTATE_SENT_STATUS; 3683 spin_unlock_bh(&cmd->istate_lock); 3684 break; 3685 case ISTATE_SEND_REJECT: 3686 if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) { 3687 cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN; 3688 complete(&cmd->reject_comp); 3689 goto err; 3690 } 3691 complete(&cmd->reject_comp); 3692 break; 3693 default: 3694 pr_err("Unknown Opcode: 0x%02x ITT:" 3695 " 0x%08x, i_state: %d on CID: %hu\n", 3696 cmd->iscsi_opcode, cmd->init_task_tag, 3697 cmd->i_state, conn->cid); 3698 goto err; 3699 } 3700 3701 if (atomic_read(&conn->check_immediate_queue)) 3702 break; 3703 } 3704 3705 return 0; 3706 3707 err: 3708 return -1; 3709 restart: 3710 return -EAGAIN; 3711 } 3712 3713 int iscsi_target_tx_thread(void *arg) 3714 { 3715 int ret = 0; 3716 struct iscsi_conn *conn; 3717 struct iscsi_thread_set *ts = arg; 3718 /* 3719 * Allow ourselves to be interrupted by SIGINT so that a 3720 * connection recovery / failure event can be triggered externally. 3721 */ 3722 allow_signal(SIGINT); 3723 3724 restart: 3725 conn = iscsi_tx_thread_pre_handler(ts); 3726 if (!conn) 3727 goto out; 3728 3729 ret = 0; 3730 3731 while (!kthread_should_stop()) { 3732 /* 3733 * Ensure that both TX and RX per connection kthreads 3734 * are scheduled to run on the same CPU. 3735 */ 3736 iscsit_thread_check_cpumask(conn, current, 1); 3737 3738 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); 3739 3740 if ((ts->status == ISCSI_THREAD_SET_RESET) || 3741 signal_pending(current)) 3742 goto transport_err; 3743 3744 ret = handle_immediate_queue(conn); 3745 if (ret < 0) 3746 goto transport_err; 3747 3748 ret = handle_response_queue(conn); 3749 if (ret == -EAGAIN) 3750 goto restart; 3751 else if (ret < 0) 3752 goto transport_err; 3753 } 3754 3755 transport_err: 3756 iscsit_take_action_for_connection_exit(conn); 3757 goto restart; 3758 out: 3759 return 0; 3760 } 3761 3762 int iscsi_target_rx_thread(void *arg) 3763 { 3764 int ret; 3765 u8 buffer[ISCSI_HDR_LEN], opcode; 3766 u32 checksum = 0, digest = 0; 3767 struct iscsi_conn *conn = NULL; 3768 struct iscsi_thread_set *ts = arg; 3769 struct kvec iov; 3770 /* 3771 * Allow ourselves to be interrupted by SIGINT so that a 3772 * connection recovery / failure event can be triggered externally. 3773 */ 3774 allow_signal(SIGINT); 3775 3776 restart: 3777 conn = iscsi_rx_thread_pre_handler(ts); 3778 if (!conn) 3779 goto out; 3780 3781 while (!kthread_should_stop()) { 3782 /* 3783 * Ensure that both TX and RX per connection kthreads 3784 * are scheduled to run on the same CPU. 3785 */ 3786 iscsit_thread_check_cpumask(conn, current, 0); 3787 3788 memset(buffer, 0, ISCSI_HDR_LEN); 3789 memset(&iov, 0, sizeof(struct kvec)); 3790 3791 iov.iov_base = buffer; 3792 iov.iov_len = ISCSI_HDR_LEN; 3793 3794 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); 3795 if (ret != ISCSI_HDR_LEN) { 3796 iscsit_rx_thread_wait_for_tcp(conn); 3797 goto transport_err; 3798 } 3799 3800 /* 3801 * Set conn->bad_hdr for use with REJECT PDUs. 3802 */ 3803 memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN); 3804 3805 if (conn->conn_ops->HeaderDigest) { 3806 iov.iov_base = &digest; 3807 iov.iov_len = ISCSI_CRC_LEN; 3808 3809 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); 3810 if (ret != ISCSI_CRC_LEN) { 3811 iscsit_rx_thread_wait_for_tcp(conn); 3812 goto transport_err; 3813 } 3814 3815 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 3816 buffer, ISCSI_HDR_LEN, 3817 0, NULL, (u8 *)&checksum); 3818 3819 if (digest != checksum) { 3820 pr_err("HeaderDigest CRC32C failed," 3821 " received 0x%08x, computed 0x%08x\n", 3822 digest, checksum); 3823 /* 3824 * Set the PDU to 0xff so it will intentionally 3825 * hit default in the switch below. 3826 */ 3827 memset(buffer, 0xff, ISCSI_HDR_LEN); 3828 spin_lock_bh(&conn->sess->session_stats_lock); 3829 conn->sess->conn_digest_errors++; 3830 spin_unlock_bh(&conn->sess->session_stats_lock); 3831 } else { 3832 pr_debug("Got HeaderDigest CRC32C" 3833 " 0x%08x\n", checksum); 3834 } 3835 } 3836 3837 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) 3838 goto transport_err; 3839 3840 opcode = buffer[0] & ISCSI_OPCODE_MASK; 3841 3842 if (conn->sess->sess_ops->SessionType && 3843 ((!(opcode & ISCSI_OP_TEXT)) || 3844 (!(opcode & ISCSI_OP_LOGOUT)))) { 3845 pr_err("Received illegal iSCSI Opcode: 0x%02x" 3846 " while in Discovery Session, rejecting.\n", opcode); 3847 iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 3848 buffer, conn); 3849 goto transport_err; 3850 } 3851 3852 switch (opcode) { 3853 case ISCSI_OP_SCSI_CMD: 3854 if (iscsit_handle_scsi_cmd(conn, buffer) < 0) 3855 goto transport_err; 3856 break; 3857 case ISCSI_OP_SCSI_DATA_OUT: 3858 if (iscsit_handle_data_out(conn, buffer) < 0) 3859 goto transport_err; 3860 break; 3861 case ISCSI_OP_NOOP_OUT: 3862 if (iscsit_handle_nop_out(conn, buffer) < 0) 3863 goto transport_err; 3864 break; 3865 case ISCSI_OP_SCSI_TMFUNC: 3866 if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0) 3867 goto transport_err; 3868 break; 3869 case ISCSI_OP_TEXT: 3870 if (iscsit_handle_text_cmd(conn, buffer) < 0) 3871 goto transport_err; 3872 break; 3873 case ISCSI_OP_LOGOUT: 3874 ret = iscsit_handle_logout_cmd(conn, buffer); 3875 if (ret > 0) { 3876 wait_for_completion_timeout(&conn->conn_logout_comp, 3877 SECONDS_FOR_LOGOUT_COMP * HZ); 3878 goto transport_err; 3879 } else if (ret < 0) 3880 goto transport_err; 3881 break; 3882 case ISCSI_OP_SNACK: 3883 if (iscsit_handle_snack(conn, buffer) < 0) 3884 goto transport_err; 3885 break; 3886 default: 3887 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", 3888 opcode); 3889 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 3890 pr_err("Cannot recover from unknown" 3891 " opcode while ERL=0, closing iSCSI connection" 3892 ".\n"); 3893 goto transport_err; 3894 } 3895 if (!conn->conn_ops->OFMarker) { 3896 pr_err("Unable to recover from unknown" 3897 " opcode while OFMarker=No, closing iSCSI" 3898 " connection.\n"); 3899 goto transport_err; 3900 } 3901 if (iscsit_recover_from_unknown_opcode(conn) < 0) { 3902 pr_err("Unable to recover from unknown" 3903 " opcode, closing iSCSI connection.\n"); 3904 goto transport_err; 3905 } 3906 break; 3907 } 3908 } 3909 3910 transport_err: 3911 if (!signal_pending(current)) 3912 atomic_set(&conn->transport_failed, 1); 3913 iscsit_take_action_for_connection_exit(conn); 3914 goto restart; 3915 out: 3916 return 0; 3917 } 3918 3919 static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) 3920 { 3921 struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL; 3922 struct iscsi_session *sess = conn->sess; 3923 /* 3924 * We expect this function to only ever be called from either RX or TX 3925 * thread context via iscsit_close_connection() once the other context 3926 * has been reset -> returned sleeping pre-handler state. 3927 */ 3928 spin_lock_bh(&conn->cmd_lock); 3929 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { 3930 3931 list_del(&cmd->i_conn_node); 3932 spin_unlock_bh(&conn->cmd_lock); 3933 3934 iscsit_increment_maxcmdsn(cmd, sess); 3935 3936 iscsit_free_cmd(cmd); 3937 3938 spin_lock_bh(&conn->cmd_lock); 3939 } 3940 spin_unlock_bh(&conn->cmd_lock); 3941 } 3942 3943 static void iscsit_stop_timers_for_cmds( 3944 struct iscsi_conn *conn) 3945 { 3946 struct iscsi_cmd *cmd; 3947 3948 spin_lock_bh(&conn->cmd_lock); 3949 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 3950 if (cmd->data_direction == DMA_TO_DEVICE) 3951 iscsit_stop_dataout_timer(cmd); 3952 } 3953 spin_unlock_bh(&conn->cmd_lock); 3954 } 3955 3956 int iscsit_close_connection( 3957 struct iscsi_conn *conn) 3958 { 3959 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT); 3960 struct iscsi_session *sess = conn->sess; 3961 3962 pr_debug("Closing iSCSI connection CID %hu on SID:" 3963 " %u\n", conn->cid, sess->sid); 3964 /* 3965 * Always up conn_logout_comp just in case the RX Thread is sleeping 3966 * and the logout response never got sent because the connection 3967 * failed. 3968 */ 3969 complete(&conn->conn_logout_comp); 3970 3971 iscsi_release_thread_set(conn); 3972 3973 iscsit_stop_timers_for_cmds(conn); 3974 iscsit_stop_nopin_response_timer(conn); 3975 iscsit_stop_nopin_timer(conn); 3976 iscsit_free_queue_reqs_for_conn(conn); 3977 3978 /* 3979 * During Connection recovery drop unacknowledged out of order 3980 * commands for this connection, and prepare the other commands 3981 * for realligence. 3982 * 3983 * During normal operation clear the out of order commands (but 3984 * do not free the struct iscsi_ooo_cmdsn's) and release all 3985 * struct iscsi_cmds. 3986 */ 3987 if (atomic_read(&conn->connection_recovery)) { 3988 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn); 3989 iscsit_prepare_cmds_for_realligance(conn); 3990 } else { 3991 iscsit_clear_ooo_cmdsns_for_conn(conn); 3992 iscsit_release_commands_from_conn(conn); 3993 } 3994 3995 /* 3996 * Handle decrementing session or connection usage count if 3997 * a logout response was not able to be sent because the 3998 * connection failed. Fall back to Session Recovery here. 3999 */ 4000 if (atomic_read(&conn->conn_logout_remove)) { 4001 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) { 4002 iscsit_dec_conn_usage_count(conn); 4003 iscsit_dec_session_usage_count(sess); 4004 } 4005 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) 4006 iscsit_dec_conn_usage_count(conn); 4007 4008 atomic_set(&conn->conn_logout_remove, 0); 4009 atomic_set(&sess->session_reinstatement, 0); 4010 atomic_set(&sess->session_fall_back_to_erl0, 1); 4011 } 4012 4013 spin_lock_bh(&sess->conn_lock); 4014 list_del(&conn->conn_list); 4015 4016 /* 4017 * Attempt to let the Initiator know this connection failed by 4018 * sending an Connection Dropped Async Message on another 4019 * active connection. 4020 */ 4021 if (atomic_read(&conn->connection_recovery)) 4022 iscsit_build_conn_drop_async_message(conn); 4023 4024 spin_unlock_bh(&sess->conn_lock); 4025 4026 /* 4027 * If connection reinstatement is being performed on this connection, 4028 * up the connection reinstatement semaphore that is being blocked on 4029 * in iscsit_cause_connection_reinstatement(). 4030 */ 4031 spin_lock_bh(&conn->state_lock); 4032 if (atomic_read(&conn->sleep_on_conn_wait_comp)) { 4033 spin_unlock_bh(&conn->state_lock); 4034 complete(&conn->conn_wait_comp); 4035 wait_for_completion(&conn->conn_post_wait_comp); 4036 spin_lock_bh(&conn->state_lock); 4037 } 4038 4039 /* 4040 * If connection reinstatement is being performed on this connection 4041 * by receiving a REMOVECONNFORRECOVERY logout request, up the 4042 * connection wait rcfr semaphore that is being blocked on 4043 * an iscsit_connection_reinstatement_rcfr(). 4044 */ 4045 if (atomic_read(&conn->connection_wait_rcfr)) { 4046 spin_unlock_bh(&conn->state_lock); 4047 complete(&conn->conn_wait_rcfr_comp); 4048 wait_for_completion(&conn->conn_post_wait_comp); 4049 spin_lock_bh(&conn->state_lock); 4050 } 4051 atomic_set(&conn->connection_reinstatement, 1); 4052 spin_unlock_bh(&conn->state_lock); 4053 4054 /* 4055 * If any other processes are accessing this connection pointer we 4056 * must wait until they have completed. 4057 */ 4058 iscsit_check_conn_usage_count(conn); 4059 4060 if (conn->conn_rx_hash.tfm) 4061 crypto_free_hash(conn->conn_rx_hash.tfm); 4062 if (conn->conn_tx_hash.tfm) 4063 crypto_free_hash(conn->conn_tx_hash.tfm); 4064 4065 if (conn->conn_cpumask) 4066 free_cpumask_var(conn->conn_cpumask); 4067 4068 kfree(conn->conn_ops); 4069 conn->conn_ops = NULL; 4070 4071 if (conn->sock) 4072 sock_release(conn->sock); 4073 conn->thread_set = NULL; 4074 4075 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 4076 conn->conn_state = TARG_CONN_STATE_FREE; 4077 kfree(conn); 4078 4079 spin_lock_bh(&sess->conn_lock); 4080 atomic_dec(&sess->nconn); 4081 pr_debug("Decremented iSCSI connection count to %hu from node:" 4082 " %s\n", atomic_read(&sess->nconn), 4083 sess->sess_ops->InitiatorName); 4084 /* 4085 * Make sure that if one connection fails in an non ERL=2 iSCSI 4086 * Session that they all fail. 4087 */ 4088 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout && 4089 !atomic_read(&sess->session_logout)) 4090 atomic_set(&sess->session_fall_back_to_erl0, 1); 4091 4092 /* 4093 * If this was not the last connection in the session, and we are 4094 * performing session reinstatement or falling back to ERL=0, call 4095 * iscsit_stop_session() without sleeping to shutdown the other 4096 * active connections. 4097 */ 4098 if (atomic_read(&sess->nconn)) { 4099 if (!atomic_read(&sess->session_reinstatement) && 4100 !atomic_read(&sess->session_fall_back_to_erl0)) { 4101 spin_unlock_bh(&sess->conn_lock); 4102 return 0; 4103 } 4104 if (!atomic_read(&sess->session_stop_active)) { 4105 atomic_set(&sess->session_stop_active, 1); 4106 spin_unlock_bh(&sess->conn_lock); 4107 iscsit_stop_session(sess, 0, 0); 4108 return 0; 4109 } 4110 spin_unlock_bh(&sess->conn_lock); 4111 return 0; 4112 } 4113 4114 /* 4115 * If this was the last connection in the session and one of the 4116 * following is occurring: 4117 * 4118 * Session Reinstatement is not being performed, and are falling back 4119 * to ERL=0 call iscsit_close_session(). 4120 * 4121 * Session Logout was requested. iscsit_close_session() will be called 4122 * elsewhere. 4123 * 4124 * Session Continuation is not being performed, start the Time2Retain 4125 * handler and check if sleep_on_sess_wait_sem is active. 4126 */ 4127 if (!atomic_read(&sess->session_reinstatement) && 4128 atomic_read(&sess->session_fall_back_to_erl0)) { 4129 spin_unlock_bh(&sess->conn_lock); 4130 target_put_session(sess->se_sess); 4131 4132 return 0; 4133 } else if (atomic_read(&sess->session_logout)) { 4134 pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); 4135 sess->session_state = TARG_SESS_STATE_FREE; 4136 spin_unlock_bh(&sess->conn_lock); 4137 4138 if (atomic_read(&sess->sleep_on_sess_wait_comp)) 4139 complete(&sess->session_wait_comp); 4140 4141 return 0; 4142 } else { 4143 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); 4144 sess->session_state = TARG_SESS_STATE_FAILED; 4145 4146 if (!atomic_read(&sess->session_continuation)) { 4147 spin_unlock_bh(&sess->conn_lock); 4148 iscsit_start_time2retain_handler(sess); 4149 } else 4150 spin_unlock_bh(&sess->conn_lock); 4151 4152 if (atomic_read(&sess->sleep_on_sess_wait_comp)) 4153 complete(&sess->session_wait_comp); 4154 4155 return 0; 4156 } 4157 spin_unlock_bh(&sess->conn_lock); 4158 4159 return 0; 4160 } 4161 4162 int iscsit_close_session(struct iscsi_session *sess) 4163 { 4164 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 4165 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4166 4167 if (atomic_read(&sess->nconn)) { 4168 pr_err("%d connection(s) still exist for iSCSI session" 4169 " to %s\n", atomic_read(&sess->nconn), 4170 sess->sess_ops->InitiatorName); 4171 BUG(); 4172 } 4173 4174 spin_lock_bh(&se_tpg->session_lock); 4175 atomic_set(&sess->session_logout, 1); 4176 atomic_set(&sess->session_reinstatement, 1); 4177 iscsit_stop_time2retain_timer(sess); 4178 spin_unlock_bh(&se_tpg->session_lock); 4179 4180 /* 4181 * transport_deregister_session_configfs() will clear the 4182 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context 4183 * can be setting it again with __transport_register_session() in 4184 * iscsi_post_login_handler() again after the iscsit_stop_session() 4185 * completes in iscsi_np context. 4186 */ 4187 transport_deregister_session_configfs(sess->se_sess); 4188 4189 /* 4190 * If any other processes are accessing this session pointer we must 4191 * wait until they have completed. If we are in an interrupt (the 4192 * time2retain handler) and contain and active session usage count we 4193 * restart the timer and exit. 4194 */ 4195 if (!in_interrupt()) { 4196 if (iscsit_check_session_usage_count(sess) == 1) 4197 iscsit_stop_session(sess, 1, 1); 4198 } else { 4199 if (iscsit_check_session_usage_count(sess) == 2) { 4200 atomic_set(&sess->session_logout, 0); 4201 iscsit_start_time2retain_handler(sess); 4202 return 0; 4203 } 4204 } 4205 4206 transport_deregister_session(sess->se_sess); 4207 4208 if (sess->sess_ops->ErrorRecoveryLevel == 2) 4209 iscsit_free_connection_recovery_entires(sess); 4210 4211 iscsit_free_all_ooo_cmdsns(sess); 4212 4213 spin_lock_bh(&se_tpg->session_lock); 4214 pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); 4215 sess->session_state = TARG_SESS_STATE_FREE; 4216 pr_debug("Released iSCSI session from node: %s\n", 4217 sess->sess_ops->InitiatorName); 4218 tpg->nsessions--; 4219 if (tpg->tpg_tiqn) 4220 tpg->tpg_tiqn->tiqn_nsessions--; 4221 4222 pr_debug("Decremented number of active iSCSI Sessions on" 4223 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions); 4224 4225 spin_lock(&sess_idr_lock); 4226 idr_remove(&sess_idr, sess->session_index); 4227 spin_unlock(&sess_idr_lock); 4228 4229 kfree(sess->sess_ops); 4230 sess->sess_ops = NULL; 4231 spin_unlock_bh(&se_tpg->session_lock); 4232 4233 kfree(sess); 4234 return 0; 4235 } 4236 4237 static void iscsit_logout_post_handler_closesession( 4238 struct iscsi_conn *conn) 4239 { 4240 struct iscsi_session *sess = conn->sess; 4241 4242 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); 4243 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); 4244 4245 atomic_set(&conn->conn_logout_remove, 0); 4246 complete(&conn->conn_logout_comp); 4247 4248 iscsit_dec_conn_usage_count(conn); 4249 iscsit_stop_session(sess, 1, 1); 4250 iscsit_dec_session_usage_count(sess); 4251 target_put_session(sess->se_sess); 4252 } 4253 4254 static void iscsit_logout_post_handler_samecid( 4255 struct iscsi_conn *conn) 4256 { 4257 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); 4258 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); 4259 4260 atomic_set(&conn->conn_logout_remove, 0); 4261 complete(&conn->conn_logout_comp); 4262 4263 iscsit_cause_connection_reinstatement(conn, 1); 4264 iscsit_dec_conn_usage_count(conn); 4265 } 4266 4267 static void iscsit_logout_post_handler_diffcid( 4268 struct iscsi_conn *conn, 4269 u16 cid) 4270 { 4271 struct iscsi_conn *l_conn; 4272 struct iscsi_session *sess = conn->sess; 4273 4274 if (!sess) 4275 return; 4276 4277 spin_lock_bh(&sess->conn_lock); 4278 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) { 4279 if (l_conn->cid == cid) { 4280 iscsit_inc_conn_usage_count(l_conn); 4281 break; 4282 } 4283 } 4284 spin_unlock_bh(&sess->conn_lock); 4285 4286 if (!l_conn) 4287 return; 4288 4289 if (l_conn->sock) 4290 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN); 4291 4292 spin_lock_bh(&l_conn->state_lock); 4293 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 4294 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; 4295 spin_unlock_bh(&l_conn->state_lock); 4296 4297 iscsit_cause_connection_reinstatement(l_conn, 1); 4298 iscsit_dec_conn_usage_count(l_conn); 4299 } 4300 4301 /* 4302 * Return of 0 causes the TX thread to restart. 4303 */ 4304 static int iscsit_logout_post_handler( 4305 struct iscsi_cmd *cmd, 4306 struct iscsi_conn *conn) 4307 { 4308 int ret = 0; 4309 4310 switch (cmd->logout_reason) { 4311 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 4312 switch (cmd->logout_response) { 4313 case ISCSI_LOGOUT_SUCCESS: 4314 case ISCSI_LOGOUT_CLEANUP_FAILED: 4315 default: 4316 iscsit_logout_post_handler_closesession(conn); 4317 break; 4318 } 4319 ret = 0; 4320 break; 4321 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 4322 if (conn->cid == cmd->logout_cid) { 4323 switch (cmd->logout_response) { 4324 case ISCSI_LOGOUT_SUCCESS: 4325 case ISCSI_LOGOUT_CLEANUP_FAILED: 4326 default: 4327 iscsit_logout_post_handler_samecid(conn); 4328 break; 4329 } 4330 ret = 0; 4331 } else { 4332 switch (cmd->logout_response) { 4333 case ISCSI_LOGOUT_SUCCESS: 4334 iscsit_logout_post_handler_diffcid(conn, 4335 cmd->logout_cid); 4336 break; 4337 case ISCSI_LOGOUT_CID_NOT_FOUND: 4338 case ISCSI_LOGOUT_CLEANUP_FAILED: 4339 default: 4340 break; 4341 } 4342 ret = 1; 4343 } 4344 break; 4345 case ISCSI_LOGOUT_REASON_RECOVERY: 4346 switch (cmd->logout_response) { 4347 case ISCSI_LOGOUT_SUCCESS: 4348 case ISCSI_LOGOUT_CID_NOT_FOUND: 4349 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED: 4350 case ISCSI_LOGOUT_CLEANUP_FAILED: 4351 default: 4352 break; 4353 } 4354 ret = 1; 4355 break; 4356 default: 4357 break; 4358 4359 } 4360 return ret; 4361 } 4362 4363 void iscsit_fail_session(struct iscsi_session *sess) 4364 { 4365 struct iscsi_conn *conn; 4366 4367 spin_lock_bh(&sess->conn_lock); 4368 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 4369 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n"); 4370 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT; 4371 } 4372 spin_unlock_bh(&sess->conn_lock); 4373 4374 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); 4375 sess->session_state = TARG_SESS_STATE_FAILED; 4376 } 4377 4378 int iscsit_free_session(struct iscsi_session *sess) 4379 { 4380 u16 conn_count = atomic_read(&sess->nconn); 4381 struct iscsi_conn *conn, *conn_tmp = NULL; 4382 int is_last; 4383 4384 spin_lock_bh(&sess->conn_lock); 4385 atomic_set(&sess->sleep_on_sess_wait_comp, 1); 4386 4387 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, 4388 conn_list) { 4389 if (conn_count == 0) 4390 break; 4391 4392 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { 4393 is_last = 1; 4394 } else { 4395 iscsit_inc_conn_usage_count(conn_tmp); 4396 is_last = 0; 4397 } 4398 iscsit_inc_conn_usage_count(conn); 4399 4400 spin_unlock_bh(&sess->conn_lock); 4401 iscsit_cause_connection_reinstatement(conn, 1); 4402 spin_lock_bh(&sess->conn_lock); 4403 4404 iscsit_dec_conn_usage_count(conn); 4405 if (is_last == 0) 4406 iscsit_dec_conn_usage_count(conn_tmp); 4407 4408 conn_count--; 4409 } 4410 4411 if (atomic_read(&sess->nconn)) { 4412 spin_unlock_bh(&sess->conn_lock); 4413 wait_for_completion(&sess->session_wait_comp); 4414 } else 4415 spin_unlock_bh(&sess->conn_lock); 4416 4417 target_put_session(sess->se_sess); 4418 return 0; 4419 } 4420 4421 void iscsit_stop_session( 4422 struct iscsi_session *sess, 4423 int session_sleep, 4424 int connection_sleep) 4425 { 4426 u16 conn_count = atomic_read(&sess->nconn); 4427 struct iscsi_conn *conn, *conn_tmp = NULL; 4428 int is_last; 4429 4430 spin_lock_bh(&sess->conn_lock); 4431 if (session_sleep) 4432 atomic_set(&sess->sleep_on_sess_wait_comp, 1); 4433 4434 if (connection_sleep) { 4435 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, 4436 conn_list) { 4437 if (conn_count == 0) 4438 break; 4439 4440 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { 4441 is_last = 1; 4442 } else { 4443 iscsit_inc_conn_usage_count(conn_tmp); 4444 is_last = 0; 4445 } 4446 iscsit_inc_conn_usage_count(conn); 4447 4448 spin_unlock_bh(&sess->conn_lock); 4449 iscsit_cause_connection_reinstatement(conn, 1); 4450 spin_lock_bh(&sess->conn_lock); 4451 4452 iscsit_dec_conn_usage_count(conn); 4453 if (is_last == 0) 4454 iscsit_dec_conn_usage_count(conn_tmp); 4455 conn_count--; 4456 } 4457 } else { 4458 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) 4459 iscsit_cause_connection_reinstatement(conn, 0); 4460 } 4461 4462 if (session_sleep && atomic_read(&sess->nconn)) { 4463 spin_unlock_bh(&sess->conn_lock); 4464 wait_for_completion(&sess->session_wait_comp); 4465 } else 4466 spin_unlock_bh(&sess->conn_lock); 4467 } 4468 4469 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) 4470 { 4471 struct iscsi_session *sess; 4472 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4473 struct se_session *se_sess, *se_sess_tmp; 4474 int session_count = 0; 4475 4476 spin_lock_bh(&se_tpg->session_lock); 4477 if (tpg->nsessions && !force) { 4478 spin_unlock_bh(&se_tpg->session_lock); 4479 return -1; 4480 } 4481 4482 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, 4483 sess_list) { 4484 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 4485 4486 spin_lock(&sess->conn_lock); 4487 if (atomic_read(&sess->session_fall_back_to_erl0) || 4488 atomic_read(&sess->session_logout) || 4489 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { 4490 spin_unlock(&sess->conn_lock); 4491 continue; 4492 } 4493 atomic_set(&sess->session_reinstatement, 1); 4494 spin_unlock(&sess->conn_lock); 4495 spin_unlock_bh(&se_tpg->session_lock); 4496 4497 iscsit_free_session(sess); 4498 spin_lock_bh(&se_tpg->session_lock); 4499 4500 session_count++; 4501 } 4502 spin_unlock_bh(&se_tpg->session_lock); 4503 4504 pr_debug("Released %d iSCSI Session(s) from Target Portal" 4505 " Group: %hu\n", session_count, tpg->tpgt); 4506 return 0; 4507 } 4508 4509 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure"); 4510 MODULE_VERSION("4.1.x"); 4511 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 4512 MODULE_LICENSE("GPL"); 4513 4514 module_init(iscsi_target_init_module); 4515 module_exit(iscsi_target_cleanup_module); 4516