1 /******************************************************************************* 2 * This file contains main functions related to the iSCSI Target Core Driver. 3 * 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 7 * 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 ******************************************************************************/ 20 21 #include <linux/string.h> 22 #include <linux/kthread.h> 23 #include <linux/crypto.h> 24 #include <linux/completion.h> 25 #include <linux/module.h> 26 #include <asm/unaligned.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/iscsi_proto.h> 29 #include <target/target_core_base.h> 30 #include <target/target_core_tmr.h> 31 #include <target/target_core_transport.h> 32 33 #include "iscsi_target_core.h" 34 #include "iscsi_target_parameters.h" 35 #include "iscsi_target_seq_pdu_list.h" 36 #include "iscsi_target_tq.h" 37 #include "iscsi_target_configfs.h" 38 #include "iscsi_target_datain_values.h" 39 #include "iscsi_target_erl0.h" 40 #include "iscsi_target_erl1.h" 41 #include "iscsi_target_erl2.h" 42 #include "iscsi_target_login.h" 43 #include "iscsi_target_tmr.h" 44 #include "iscsi_target_tpg.h" 45 #include "iscsi_target_util.h" 46 #include "iscsi_target.h" 47 #include "iscsi_target_device.h" 48 #include "iscsi_target_stat.h" 49 50 static LIST_HEAD(g_tiqn_list); 51 static LIST_HEAD(g_np_list); 52 static DEFINE_SPINLOCK(tiqn_lock); 53 static DEFINE_SPINLOCK(np_lock); 54 55 static struct idr tiqn_idr; 56 struct idr sess_idr; 57 struct mutex auth_id_lock; 58 spinlock_t sess_idr_lock; 59 60 struct iscsit_global *iscsit_global; 61 62 struct kmem_cache *lio_cmd_cache; 63 struct kmem_cache *lio_qr_cache; 64 struct kmem_cache *lio_dr_cache; 65 struct kmem_cache *lio_ooo_cache; 66 struct kmem_cache *lio_r2t_cache; 67 68 static int iscsit_handle_immediate_data(struct iscsi_cmd *, 69 unsigned char *buf, u32); 70 static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *); 71 72 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf) 73 { 74 struct iscsi_tiqn *tiqn = NULL; 75 76 spin_lock(&tiqn_lock); 77 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 78 if (!strcmp(tiqn->tiqn, buf)) { 79 80 spin_lock(&tiqn->tiqn_state_lock); 81 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { 82 tiqn->tiqn_access_count++; 83 spin_unlock(&tiqn->tiqn_state_lock); 84 spin_unlock(&tiqn_lock); 85 return tiqn; 86 } 87 spin_unlock(&tiqn->tiqn_state_lock); 88 } 89 } 90 spin_unlock(&tiqn_lock); 91 92 return NULL; 93 } 94 95 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn) 96 { 97 spin_lock(&tiqn->tiqn_state_lock); 98 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { 99 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN; 100 spin_unlock(&tiqn->tiqn_state_lock); 101 return 0; 102 } 103 spin_unlock(&tiqn->tiqn_state_lock); 104 105 return -1; 106 } 107 108 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn) 109 { 110 spin_lock(&tiqn->tiqn_state_lock); 111 tiqn->tiqn_access_count--; 112 spin_unlock(&tiqn->tiqn_state_lock); 113 } 114 115 /* 116 * Note that IQN formatting is expected to be done in userspace, and 117 * no explict IQN format checks are done here. 118 */ 119 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf) 120 { 121 struct iscsi_tiqn *tiqn = NULL; 122 int ret; 123 124 if (strlen(buf) >= ISCSI_IQN_LEN) { 125 pr_err("Target IQN exceeds %d bytes\n", 126 ISCSI_IQN_LEN); 127 return ERR_PTR(-EINVAL); 128 } 129 130 tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL); 131 if (!tiqn) { 132 pr_err("Unable to allocate struct iscsi_tiqn\n"); 133 return ERR_PTR(-ENOMEM); 134 } 135 136 sprintf(tiqn->tiqn, "%s", buf); 137 INIT_LIST_HEAD(&tiqn->tiqn_list); 138 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list); 139 spin_lock_init(&tiqn->tiqn_state_lock); 140 spin_lock_init(&tiqn->tiqn_tpg_lock); 141 spin_lock_init(&tiqn->sess_err_stats.lock); 142 spin_lock_init(&tiqn->login_stats.lock); 143 spin_lock_init(&tiqn->logout_stats.lock); 144 145 if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) { 146 pr_err("idr_pre_get() for tiqn_idr failed\n"); 147 kfree(tiqn); 148 return ERR_PTR(-ENOMEM); 149 } 150 tiqn->tiqn_state = TIQN_STATE_ACTIVE; 151 152 spin_lock(&tiqn_lock); 153 ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index); 154 if (ret < 0) { 155 pr_err("idr_get_new() failed for tiqn->tiqn_index\n"); 156 spin_unlock(&tiqn_lock); 157 kfree(tiqn); 158 return ERR_PTR(ret); 159 } 160 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list); 161 spin_unlock(&tiqn_lock); 162 163 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn); 164 165 return tiqn; 166 167 } 168 169 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn) 170 { 171 /* 172 * Wait for accesses to said struct iscsi_tiqn to end. 173 */ 174 spin_lock(&tiqn->tiqn_state_lock); 175 while (tiqn->tiqn_access_count != 0) { 176 spin_unlock(&tiqn->tiqn_state_lock); 177 msleep(10); 178 spin_lock(&tiqn->tiqn_state_lock); 179 } 180 spin_unlock(&tiqn->tiqn_state_lock); 181 } 182 183 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn) 184 { 185 /* 186 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN 187 * while holding tiqn->tiqn_state_lock. This means that all subsequent 188 * attempts to access this struct iscsi_tiqn will fail from both transport 189 * fabric and control code paths. 190 */ 191 if (iscsit_set_tiqn_shutdown(tiqn) < 0) { 192 pr_err("iscsit_set_tiqn_shutdown() failed\n"); 193 return; 194 } 195 196 iscsit_wait_for_tiqn(tiqn); 197 198 spin_lock(&tiqn_lock); 199 list_del(&tiqn->tiqn_list); 200 idr_remove(&tiqn_idr, tiqn->tiqn_index); 201 spin_unlock(&tiqn_lock); 202 203 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n", 204 tiqn->tiqn); 205 kfree(tiqn); 206 } 207 208 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) 209 { 210 int ret; 211 /* 212 * Determine if the network portal is accepting storage traffic. 213 */ 214 spin_lock_bh(&np->np_thread_lock); 215 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 216 spin_unlock_bh(&np->np_thread_lock); 217 return -1; 218 } 219 if (np->np_login_tpg) { 220 pr_err("np->np_login_tpg() is not NULL!\n"); 221 spin_unlock_bh(&np->np_thread_lock); 222 return -1; 223 } 224 spin_unlock_bh(&np->np_thread_lock); 225 /* 226 * Determine if the portal group is accepting storage traffic. 227 */ 228 spin_lock_bh(&tpg->tpg_state_lock); 229 if (tpg->tpg_state != TPG_STATE_ACTIVE) { 230 spin_unlock_bh(&tpg->tpg_state_lock); 231 return -1; 232 } 233 spin_unlock_bh(&tpg->tpg_state_lock); 234 235 /* 236 * Here we serialize access across the TIQN+TPG Tuple. 237 */ 238 ret = mutex_lock_interruptible(&tpg->np_login_lock); 239 if ((ret != 0) || signal_pending(current)) 240 return -1; 241 242 spin_lock_bh(&np->np_thread_lock); 243 np->np_login_tpg = tpg; 244 spin_unlock_bh(&np->np_thread_lock); 245 246 return 0; 247 } 248 249 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) 250 { 251 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 252 253 spin_lock_bh(&np->np_thread_lock); 254 np->np_login_tpg = NULL; 255 spin_unlock_bh(&np->np_thread_lock); 256 257 mutex_unlock(&tpg->np_login_lock); 258 259 if (tiqn) 260 iscsit_put_tiqn_for_login(tiqn); 261 262 return 0; 263 } 264 265 static struct iscsi_np *iscsit_get_np( 266 struct __kernel_sockaddr_storage *sockaddr, 267 int network_transport) 268 { 269 struct sockaddr_in *sock_in, *sock_in_e; 270 struct sockaddr_in6 *sock_in6, *sock_in6_e; 271 struct iscsi_np *np; 272 int ip_match = 0; 273 u16 port; 274 275 spin_lock_bh(&np_lock); 276 list_for_each_entry(np, &g_np_list, np_list) { 277 spin_lock(&np->np_thread_lock); 278 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 279 spin_unlock(&np->np_thread_lock); 280 continue; 281 } 282 283 if (sockaddr->ss_family == AF_INET6) { 284 sock_in6 = (struct sockaddr_in6 *)sockaddr; 285 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr; 286 287 if (!memcmp((void *)&sock_in6->sin6_addr.in6_u, 288 (void *)&sock_in6_e->sin6_addr.in6_u, 289 sizeof(struct in6_addr))) 290 ip_match = 1; 291 292 port = ntohs(sock_in6->sin6_port); 293 } else { 294 sock_in = (struct sockaddr_in *)sockaddr; 295 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr; 296 297 if (sock_in->sin_addr.s_addr == 298 sock_in_e->sin_addr.s_addr) 299 ip_match = 1; 300 301 port = ntohs(sock_in->sin_port); 302 } 303 304 if ((ip_match == 1) && (np->np_port == port) && 305 (np->np_network_transport == network_transport)) { 306 /* 307 * Increment the np_exports reference count now to 308 * prevent iscsit_del_np() below from being called 309 * while iscsi_tpg_add_network_portal() is called. 310 */ 311 np->np_exports++; 312 spin_unlock(&np->np_thread_lock); 313 spin_unlock_bh(&np_lock); 314 return np; 315 } 316 spin_unlock(&np->np_thread_lock); 317 } 318 spin_unlock_bh(&np_lock); 319 320 return NULL; 321 } 322 323 struct iscsi_np *iscsit_add_np( 324 struct __kernel_sockaddr_storage *sockaddr, 325 char *ip_str, 326 int network_transport) 327 { 328 struct sockaddr_in *sock_in; 329 struct sockaddr_in6 *sock_in6; 330 struct iscsi_np *np; 331 int ret; 332 /* 333 * Locate the existing struct iscsi_np if already active.. 334 */ 335 np = iscsit_get_np(sockaddr, network_transport); 336 if (np) 337 return np; 338 339 np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL); 340 if (!np) { 341 pr_err("Unable to allocate memory for struct iscsi_np\n"); 342 return ERR_PTR(-ENOMEM); 343 } 344 345 np->np_flags |= NPF_IP_NETWORK; 346 if (sockaddr->ss_family == AF_INET6) { 347 sock_in6 = (struct sockaddr_in6 *)sockaddr; 348 snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str); 349 np->np_port = ntohs(sock_in6->sin6_port); 350 } else { 351 sock_in = (struct sockaddr_in *)sockaddr; 352 sprintf(np->np_ip, "%s", ip_str); 353 np->np_port = ntohs(sock_in->sin_port); 354 } 355 356 np->np_network_transport = network_transport; 357 spin_lock_init(&np->np_thread_lock); 358 init_completion(&np->np_restart_comp); 359 INIT_LIST_HEAD(&np->np_list); 360 361 ret = iscsi_target_setup_login_socket(np, sockaddr); 362 if (ret != 0) { 363 kfree(np); 364 return ERR_PTR(ret); 365 } 366 367 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np"); 368 if (IS_ERR(np->np_thread)) { 369 pr_err("Unable to create kthread: iscsi_np\n"); 370 ret = PTR_ERR(np->np_thread); 371 kfree(np); 372 return ERR_PTR(ret); 373 } 374 /* 375 * Increment the np_exports reference count now to prevent 376 * iscsit_del_np() below from being run while a new call to 377 * iscsi_tpg_add_network_portal() for a matching iscsi_np is 378 * active. We don't need to hold np->np_thread_lock at this 379 * point because iscsi_np has not been added to g_np_list yet. 380 */ 381 np->np_exports = 1; 382 383 spin_lock_bh(&np_lock); 384 list_add_tail(&np->np_list, &g_np_list); 385 spin_unlock_bh(&np_lock); 386 387 pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", 388 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? 389 "TCP" : "SCTP"); 390 391 return np; 392 } 393 394 int iscsit_reset_np_thread( 395 struct iscsi_np *np, 396 struct iscsi_tpg_np *tpg_np, 397 struct iscsi_portal_group *tpg) 398 { 399 spin_lock_bh(&np->np_thread_lock); 400 if (tpg && tpg_np) { 401 /* 402 * The reset operation need only be performed when the 403 * passed struct iscsi_portal_group has a login in progress 404 * to one of the network portals. 405 */ 406 if (tpg_np->tpg_np->np_login_tpg != tpg) { 407 spin_unlock_bh(&np->np_thread_lock); 408 return 0; 409 } 410 } 411 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) { 412 spin_unlock_bh(&np->np_thread_lock); 413 return 0; 414 } 415 np->np_thread_state = ISCSI_NP_THREAD_RESET; 416 417 if (np->np_thread) { 418 spin_unlock_bh(&np->np_thread_lock); 419 send_sig(SIGINT, np->np_thread, 1); 420 wait_for_completion(&np->np_restart_comp); 421 spin_lock_bh(&np->np_thread_lock); 422 } 423 spin_unlock_bh(&np->np_thread_lock); 424 425 return 0; 426 } 427 428 int iscsit_del_np_comm(struct iscsi_np *np) 429 { 430 if (!np->np_socket) 431 return 0; 432 433 /* 434 * Some network transports allocate their own struct sock->file, 435 * see if we need to free any additional allocated resources. 436 */ 437 if (np->np_flags & NPF_SCTP_STRUCT_FILE) { 438 kfree(np->np_socket->file); 439 np->np_socket->file = NULL; 440 } 441 442 sock_release(np->np_socket); 443 return 0; 444 } 445 446 int iscsit_del_np(struct iscsi_np *np) 447 { 448 spin_lock_bh(&np->np_thread_lock); 449 np->np_exports--; 450 if (np->np_exports) { 451 spin_unlock_bh(&np->np_thread_lock); 452 return 0; 453 } 454 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN; 455 spin_unlock_bh(&np->np_thread_lock); 456 457 if (np->np_thread) { 458 /* 459 * We need to send the signal to wakeup Linux/Net 460 * which may be sleeping in sock_accept().. 461 */ 462 send_sig(SIGINT, np->np_thread, 1); 463 kthread_stop(np->np_thread); 464 } 465 iscsit_del_np_comm(np); 466 467 spin_lock_bh(&np_lock); 468 list_del(&np->np_list); 469 spin_unlock_bh(&np_lock); 470 471 pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", 472 np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ? 473 "TCP" : "SCTP"); 474 475 kfree(np); 476 return 0; 477 } 478 479 static int __init iscsi_target_init_module(void) 480 { 481 int ret = 0; 482 483 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); 484 485 iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL); 486 if (!iscsit_global) { 487 pr_err("Unable to allocate memory for iscsit_global\n"); 488 return -1; 489 } 490 mutex_init(&auth_id_lock); 491 spin_lock_init(&sess_idr_lock); 492 idr_init(&tiqn_idr); 493 idr_init(&sess_idr); 494 495 ret = iscsi_target_register_configfs(); 496 if (ret < 0) 497 goto out; 498 499 ret = iscsi_thread_set_init(); 500 if (ret < 0) 501 goto configfs_out; 502 503 if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) != 504 TARGET_THREAD_SET_COUNT) { 505 pr_err("iscsi_allocate_thread_sets() returned" 506 " unexpected value!\n"); 507 goto ts_out1; 508 } 509 510 lio_cmd_cache = kmem_cache_create("lio_cmd_cache", 511 sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd), 512 0, NULL); 513 if (!lio_cmd_cache) { 514 pr_err("Unable to kmem_cache_create() for" 515 " lio_cmd_cache\n"); 516 goto ts_out2; 517 } 518 519 lio_qr_cache = kmem_cache_create("lio_qr_cache", 520 sizeof(struct iscsi_queue_req), 521 __alignof__(struct iscsi_queue_req), 0, NULL); 522 if (!lio_qr_cache) { 523 pr_err("nable to kmem_cache_create() for" 524 " lio_qr_cache\n"); 525 goto cmd_out; 526 } 527 528 lio_dr_cache = kmem_cache_create("lio_dr_cache", 529 sizeof(struct iscsi_datain_req), 530 __alignof__(struct iscsi_datain_req), 0, NULL); 531 if (!lio_dr_cache) { 532 pr_err("Unable to kmem_cache_create() for" 533 " lio_dr_cache\n"); 534 goto qr_out; 535 } 536 537 lio_ooo_cache = kmem_cache_create("lio_ooo_cache", 538 sizeof(struct iscsi_ooo_cmdsn), 539 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL); 540 if (!lio_ooo_cache) { 541 pr_err("Unable to kmem_cache_create() for" 542 " lio_ooo_cache\n"); 543 goto dr_out; 544 } 545 546 lio_r2t_cache = kmem_cache_create("lio_r2t_cache", 547 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t), 548 0, NULL); 549 if (!lio_r2t_cache) { 550 pr_err("Unable to kmem_cache_create() for" 551 " lio_r2t_cache\n"); 552 goto ooo_out; 553 } 554 555 if (iscsit_load_discovery_tpg() < 0) 556 goto r2t_out; 557 558 return ret; 559 r2t_out: 560 kmem_cache_destroy(lio_r2t_cache); 561 ooo_out: 562 kmem_cache_destroy(lio_ooo_cache); 563 dr_out: 564 kmem_cache_destroy(lio_dr_cache); 565 qr_out: 566 kmem_cache_destroy(lio_qr_cache); 567 cmd_out: 568 kmem_cache_destroy(lio_cmd_cache); 569 ts_out2: 570 iscsi_deallocate_thread_sets(); 571 ts_out1: 572 iscsi_thread_set_free(); 573 configfs_out: 574 iscsi_target_deregister_configfs(); 575 out: 576 kfree(iscsit_global); 577 return -ENOMEM; 578 } 579 580 static void __exit iscsi_target_cleanup_module(void) 581 { 582 iscsi_deallocate_thread_sets(); 583 iscsi_thread_set_free(); 584 iscsit_release_discovery_tpg(); 585 kmem_cache_destroy(lio_cmd_cache); 586 kmem_cache_destroy(lio_qr_cache); 587 kmem_cache_destroy(lio_dr_cache); 588 kmem_cache_destroy(lio_ooo_cache); 589 kmem_cache_destroy(lio_r2t_cache); 590 591 iscsi_target_deregister_configfs(); 592 593 kfree(iscsit_global); 594 } 595 596 int iscsit_add_reject( 597 u8 reason, 598 int fail_conn, 599 unsigned char *buf, 600 struct iscsi_conn *conn) 601 { 602 struct iscsi_cmd *cmd; 603 struct iscsi_reject *hdr; 604 int ret; 605 606 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 607 if (!cmd) 608 return -1; 609 610 cmd->iscsi_opcode = ISCSI_OP_REJECT; 611 if (fail_conn) 612 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN; 613 614 hdr = (struct iscsi_reject *) cmd->pdu; 615 hdr->reason = reason; 616 617 cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 618 if (!cmd->buf_ptr) { 619 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 620 iscsit_release_cmd(cmd); 621 return -1; 622 } 623 memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN); 624 625 spin_lock_bh(&conn->cmd_lock); 626 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 627 spin_unlock_bh(&conn->cmd_lock); 628 629 cmd->i_state = ISTATE_SEND_REJECT; 630 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 631 632 ret = wait_for_completion_interruptible(&cmd->reject_comp); 633 if (ret != 0) 634 return -1; 635 636 return (!fail_conn) ? 0 : -1; 637 } 638 639 int iscsit_add_reject_from_cmd( 640 u8 reason, 641 int fail_conn, 642 int add_to_conn, 643 unsigned char *buf, 644 struct iscsi_cmd *cmd) 645 { 646 struct iscsi_conn *conn; 647 struct iscsi_reject *hdr; 648 int ret; 649 650 if (!cmd->conn) { 651 pr_err("cmd->conn is NULL for ITT: 0x%08x\n", 652 cmd->init_task_tag); 653 return -1; 654 } 655 conn = cmd->conn; 656 657 cmd->iscsi_opcode = ISCSI_OP_REJECT; 658 if (fail_conn) 659 cmd->cmd_flags |= ICF_REJECT_FAIL_CONN; 660 661 hdr = (struct iscsi_reject *) cmd->pdu; 662 hdr->reason = reason; 663 664 cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); 665 if (!cmd->buf_ptr) { 666 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 667 iscsit_release_cmd(cmd); 668 return -1; 669 } 670 memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN); 671 672 if (add_to_conn) { 673 spin_lock_bh(&conn->cmd_lock); 674 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 675 spin_unlock_bh(&conn->cmd_lock); 676 } 677 678 cmd->i_state = ISTATE_SEND_REJECT; 679 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 680 681 ret = wait_for_completion_interruptible(&cmd->reject_comp); 682 if (ret != 0) 683 return -1; 684 685 return (!fail_conn) ? 0 : -1; 686 } 687 688 /* 689 * Map some portion of the allocated scatterlist to an iovec, suitable for 690 * kernel sockets to copy data in/out. This handles both pages and slab-allocated 691 * buffers, since we have been tricky and mapped t_mem_sg to the buffer in 692 * either case (see iscsit_alloc_buffs) 693 */ 694 static int iscsit_map_iovec( 695 struct iscsi_cmd *cmd, 696 struct kvec *iov, 697 u32 data_offset, 698 u32 data_length) 699 { 700 u32 i = 0; 701 struct scatterlist *sg; 702 unsigned int page_off; 703 704 /* 705 * We have a private mapping of the allocated pages in t_mem_sg. 706 * At this point, we also know each contains a page. 707 */ 708 sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE]; 709 page_off = (data_offset % PAGE_SIZE); 710 711 cmd->first_data_sg = sg; 712 cmd->first_data_sg_off = page_off; 713 714 while (data_length) { 715 u32 cur_len = min_t(u32, data_length, sg->length - page_off); 716 717 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off; 718 iov[i].iov_len = cur_len; 719 720 data_length -= cur_len; 721 page_off = 0; 722 sg = sg_next(sg); 723 i++; 724 } 725 726 cmd->kmapped_nents = i; 727 728 return i; 729 } 730 731 static void iscsit_unmap_iovec(struct iscsi_cmd *cmd) 732 { 733 u32 i; 734 struct scatterlist *sg; 735 736 sg = cmd->first_data_sg; 737 738 for (i = 0; i < cmd->kmapped_nents; i++) 739 kunmap(sg_page(&sg[i])); 740 } 741 742 static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) 743 { 744 struct iscsi_cmd *cmd; 745 746 conn->exp_statsn = exp_statsn; 747 748 spin_lock_bh(&conn->cmd_lock); 749 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 750 spin_lock(&cmd->istate_lock); 751 if ((cmd->i_state == ISTATE_SENT_STATUS) && 752 (cmd->stat_sn < exp_statsn)) { 753 cmd->i_state = ISTATE_REMOVE; 754 spin_unlock(&cmd->istate_lock); 755 iscsit_add_cmd_to_immediate_queue(cmd, conn, 756 cmd->i_state); 757 continue; 758 } 759 spin_unlock(&cmd->istate_lock); 760 } 761 spin_unlock_bh(&conn->cmd_lock); 762 } 763 764 static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) 765 { 766 u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 : 767 cmd->se_cmd.t_data_nents; 768 769 iov_count += ISCSI_IOV_DATA_BUFFER; 770 771 cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL); 772 if (!cmd->iov_data) { 773 pr_err("Unable to allocate cmd->iov_data\n"); 774 return -ENOMEM; 775 } 776 777 cmd->orig_iov_data_count = iov_count; 778 return 0; 779 } 780 781 static int iscsit_alloc_buffs(struct iscsi_cmd *cmd) 782 { 783 struct scatterlist *sgl; 784 u32 length = cmd->se_cmd.data_length; 785 int nents = DIV_ROUND_UP(length, PAGE_SIZE); 786 int i = 0, ret; 787 /* 788 * If no SCSI payload is present, allocate the default iovecs used for 789 * iSCSI PDU Header 790 */ 791 if (!length) 792 return iscsit_allocate_iovecs(cmd); 793 794 sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL); 795 if (!sgl) 796 return -ENOMEM; 797 798 sg_init_table(sgl, nents); 799 800 while (length) { 801 int buf_size = min_t(int, length, PAGE_SIZE); 802 struct page *page; 803 804 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 805 if (!page) 806 goto page_alloc_failed; 807 808 sg_set_page(&sgl[i], page, buf_size, 0); 809 810 length -= buf_size; 811 i++; 812 } 813 814 cmd->t_mem_sg = sgl; 815 cmd->t_mem_sg_nents = nents; 816 817 /* BIDI ops not supported */ 818 819 /* Tell the core about our preallocated memory */ 820 transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0); 821 /* 822 * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd 823 * so that cmd->se_cmd.t_tasks_se_num has been set. 824 */ 825 ret = iscsit_allocate_iovecs(cmd); 826 if (ret < 0) 827 goto page_alloc_failed; 828 829 return 0; 830 831 page_alloc_failed: 832 while (i >= 0) { 833 __free_page(sg_page(&sgl[i])); 834 i--; 835 } 836 kfree(cmd->t_mem_sg); 837 cmd->t_mem_sg = NULL; 838 return -ENOMEM; 839 } 840 841 static int iscsit_handle_scsi_cmd( 842 struct iscsi_conn *conn, 843 unsigned char *buf) 844 { 845 int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret; 846 int dump_immediate_data = 0, send_check_condition = 0, payload_length; 847 struct iscsi_cmd *cmd = NULL; 848 struct iscsi_scsi_req *hdr; 849 850 spin_lock_bh(&conn->sess->session_stats_lock); 851 conn->sess->cmd_pdus++; 852 if (conn->sess->se_sess->se_node_acl) { 853 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); 854 conn->sess->se_sess->se_node_acl->num_cmds++; 855 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); 856 } 857 spin_unlock_bh(&conn->sess->session_stats_lock); 858 859 hdr = (struct iscsi_scsi_req *) buf; 860 payload_length = ntoh24(hdr->dlength); 861 hdr->itt = be32_to_cpu(hdr->itt); 862 hdr->data_length = be32_to_cpu(hdr->data_length); 863 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 864 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 865 866 /* FIXME; Add checks for AdditionalHeaderSegment */ 867 868 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && 869 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { 870 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL" 871 " not set. Bad iSCSI Initiator.\n"); 872 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 873 buf, conn); 874 } 875 876 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 877 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { 878 /* 879 * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) 880 * that adds support for RESERVE/RELEASE. There is a bug 881 * add with this new functionality that sets R/W bits when 882 * neither CDB carries any READ or WRITE datapayloads. 883 */ 884 if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { 885 hdr->flags &= ~ISCSI_FLAG_CMD_READ; 886 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; 887 goto done; 888 } 889 890 pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 891 " set when Expected Data Transfer Length is 0 for" 892 " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); 893 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 894 buf, conn); 895 } 896 done: 897 898 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && 899 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { 900 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE" 901 " MUST be set if Expected Data Transfer Length is not 0." 902 " Bad iSCSI Initiator\n"); 903 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 904 buf, conn); 905 } 906 907 if ((hdr->flags & ISCSI_FLAG_CMD_READ) && 908 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) { 909 pr_err("Bidirectional operations not supported!\n"); 910 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 911 buf, conn); 912 } 913 914 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 915 pr_err("Illegally set Immediate Bit in iSCSI Initiator" 916 " Scsi Command PDU.\n"); 917 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 918 buf, conn); 919 } 920 921 if (payload_length && !conn->sess->sess_ops->ImmediateData) { 922 pr_err("ImmediateData=No but DataSegmentLength=%u," 923 " protocol error.\n", payload_length); 924 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 925 buf, conn); 926 } 927 928 if ((hdr->data_length == payload_length) && 929 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) { 930 pr_err("Expected Data Transfer Length and Length of" 931 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL" 932 " bit is not set protocol error\n"); 933 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 934 buf, conn); 935 } 936 937 if (payload_length > hdr->data_length) { 938 pr_err("DataSegmentLength: %u is greater than" 939 " EDTL: %u, protocol error.\n", payload_length, 940 hdr->data_length); 941 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 942 buf, conn); 943 } 944 945 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 946 pr_err("DataSegmentLength: %u is greater than" 947 " MaxRecvDataSegmentLength: %u, protocol error.\n", 948 payload_length, conn->conn_ops->MaxRecvDataSegmentLength); 949 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 950 buf, conn); 951 } 952 953 if (payload_length > conn->sess->sess_ops->FirstBurstLength) { 954 pr_err("DataSegmentLength: %u is greater than" 955 " FirstBurstLength: %u, protocol error.\n", 956 payload_length, conn->sess->sess_ops->FirstBurstLength); 957 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, 958 buf, conn); 959 } 960 961 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE : 962 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : 963 DMA_NONE; 964 965 cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction, 966 (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK)); 967 if (!cmd) 968 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 969 buf, conn); 970 971 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," 972 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, 973 hdr->cmdsn, hdr->data_length, payload_length, conn->cid); 974 975 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; 976 cmd->i_state = ISTATE_NEW_CMD; 977 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 978 cmd->immediate_data = (payload_length) ? 1 : 0; 979 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) && 980 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0); 981 if (cmd->unsolicited_data) 982 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA; 983 984 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 985 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 986 spin_lock_bh(&conn->sess->ttt_lock); 987 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; 988 if (cmd->targ_xfer_tag == 0xFFFFFFFF) 989 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; 990 spin_unlock_bh(&conn->sess->ttt_lock); 991 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) 992 cmd->targ_xfer_tag = 0xFFFFFFFF; 993 cmd->cmd_sn = hdr->cmdsn; 994 cmd->exp_stat_sn = hdr->exp_statsn; 995 cmd->first_burst_len = payload_length; 996 997 if (cmd->data_direction == DMA_FROM_DEVICE) { 998 struct iscsi_datain_req *dr; 999 1000 dr = iscsit_allocate_datain_req(); 1001 if (!dr) 1002 return iscsit_add_reject_from_cmd( 1003 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1004 1, 1, buf, cmd); 1005 1006 iscsit_attach_datain_req(cmd, dr); 1007 } 1008 1009 /* 1010 * The CDB is going to an se_device_t. 1011 */ 1012 ret = iscsit_get_lun_for_cmd(cmd, hdr->cdb, 1013 get_unaligned_le64(&hdr->lun)); 1014 if (ret < 0) { 1015 if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) { 1016 pr_debug("Responding to non-acl'ed," 1017 " non-existent or non-exported iSCSI LUN:" 1018 " 0x%016Lx\n", get_unaligned_le64(&hdr->lun)); 1019 } 1020 if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES) 1021 return iscsit_add_reject_from_cmd( 1022 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1023 1, 1, buf, cmd); 1024 1025 send_check_condition = 1; 1026 goto attach_cmd; 1027 } 1028 /* 1029 * The Initiator Node has access to the LUN (the addressing method 1030 * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to 1031 * allocate 1->N transport tasks (depending on sector count and 1032 * maximum request size the physical HBA(s) can handle. 1033 */ 1034 transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb); 1035 if (transport_ret == -ENOMEM) { 1036 return iscsit_add_reject_from_cmd( 1037 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1038 1, 1, buf, cmd); 1039 } else if (transport_ret == -EINVAL) { 1040 /* 1041 * Unsupported SAM Opcode. CHECK_CONDITION will be sent 1042 * in iscsit_execute_cmd() during the CmdSN OOO Execution 1043 * Mechinism. 1044 */ 1045 send_check_condition = 1; 1046 } else { 1047 if (iscsit_decide_list_to_build(cmd, payload_length) < 0) 1048 return iscsit_add_reject_from_cmd( 1049 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1050 1, 1, buf, cmd); 1051 } 1052 1053 attach_cmd: 1054 spin_lock_bh(&conn->cmd_lock); 1055 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1056 spin_unlock_bh(&conn->cmd_lock); 1057 /* 1058 * Check if we need to delay processing because of ALUA 1059 * Active/NonOptimized primary access state.. 1060 */ 1061 core_alua_check_nonop_delay(&cmd->se_cmd); 1062 /* 1063 * Allocate and setup SGL used with transport_generic_map_mem_to_cmd(). 1064 * also call iscsit_allocate_iovecs() 1065 */ 1066 ret = iscsit_alloc_buffs(cmd); 1067 if (ret < 0) 1068 return iscsit_add_reject_from_cmd( 1069 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1070 1, 1, buf, cmd); 1071 /* 1072 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if 1073 * the Immediate Bit is not set, and no Immediate 1074 * Data is attached. 1075 * 1076 * A PDU/CmdSN carrying Immediate Data can only 1077 * be processed after the DataCRC has passed. 1078 * If the DataCRC fails, the CmdSN MUST NOT 1079 * be acknowledged. (See below) 1080 */ 1081 if (!cmd->immediate_data) { 1082 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1083 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1084 return 0; 1085 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1086 return iscsit_add_reject_from_cmd( 1087 ISCSI_REASON_PROTOCOL_ERROR, 1088 1, 0, buf, cmd); 1089 } 1090 1091 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1092 1093 /* 1094 * If no Immediate Data is attached, it's OK to return now. 1095 */ 1096 if (!cmd->immediate_data) { 1097 if (send_check_condition) 1098 return 0; 1099 1100 if (cmd->unsolicited_data) { 1101 iscsit_set_dataout_sequence_values(cmd); 1102 1103 spin_lock_bh(&cmd->dataout_timeout_lock); 1104 iscsit_start_dataout_timer(cmd, cmd->conn); 1105 spin_unlock_bh(&cmd->dataout_timeout_lock); 1106 } 1107 1108 return 0; 1109 } 1110 1111 /* 1112 * Early CHECK_CONDITIONs never make it to the transport processing 1113 * thread. They are processed in CmdSN order by 1114 * iscsit_check_received_cmdsn() below. 1115 */ 1116 if (send_check_condition) { 1117 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1118 dump_immediate_data = 1; 1119 goto after_immediate_data; 1120 } 1121 /* 1122 * Call directly into transport_generic_new_cmd() to perform 1123 * the backend memory allocation. 1124 */ 1125 ret = transport_generic_new_cmd(&cmd->se_cmd); 1126 if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) { 1127 immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1128 dump_immediate_data = 1; 1129 goto after_immediate_data; 1130 } 1131 1132 immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length); 1133 after_immediate_data: 1134 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { 1135 /* 1136 * A PDU/CmdSN carrying Immediate Data passed 1137 * DataCRC, check against ExpCmdSN/MaxCmdSN if 1138 * Immediate Bit is not set. 1139 */ 1140 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1141 /* 1142 * Special case for Unsupported SAM WRITE Opcodes 1143 * and ImmediateData=Yes. 1144 */ 1145 if (dump_immediate_data) { 1146 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 1147 return -1; 1148 } else if (cmd->unsolicited_data) { 1149 iscsit_set_dataout_sequence_values(cmd); 1150 1151 spin_lock_bh(&cmd->dataout_timeout_lock); 1152 iscsit_start_dataout_timer(cmd, cmd->conn); 1153 spin_unlock_bh(&cmd->dataout_timeout_lock); 1154 } 1155 1156 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1157 return iscsit_add_reject_from_cmd( 1158 ISCSI_REASON_PROTOCOL_ERROR, 1159 1, 0, buf, cmd); 1160 1161 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { 1162 /* 1163 * Immediate Data failed DataCRC and ERL>=1, 1164 * silently drop this PDU and let the initiator 1165 * plug the CmdSN gap. 1166 * 1167 * FIXME: Send Unsolicited NOPIN with reserved 1168 * TTT here to help the initiator figure out 1169 * the missing CmdSN, although they should be 1170 * intelligent enough to determine the missing 1171 * CmdSN and issue a retry to plug the sequence. 1172 */ 1173 cmd->i_state = ISTATE_REMOVE; 1174 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 1175 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ 1176 return -1; 1177 1178 return 0; 1179 } 1180 1181 static u32 iscsit_do_crypto_hash_sg( 1182 struct hash_desc *hash, 1183 struct iscsi_cmd *cmd, 1184 u32 data_offset, 1185 u32 data_length, 1186 u32 padding, 1187 u8 *pad_bytes) 1188 { 1189 u32 data_crc; 1190 u32 i; 1191 struct scatterlist *sg; 1192 unsigned int page_off; 1193 1194 crypto_hash_init(hash); 1195 1196 sg = cmd->first_data_sg; 1197 page_off = cmd->first_data_sg_off; 1198 1199 i = 0; 1200 while (data_length) { 1201 u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off)); 1202 1203 crypto_hash_update(hash, &sg[i], cur_len); 1204 1205 data_length -= cur_len; 1206 page_off = 0; 1207 i++; 1208 } 1209 1210 if (padding) { 1211 struct scatterlist pad_sg; 1212 1213 sg_init_one(&pad_sg, pad_bytes, padding); 1214 crypto_hash_update(hash, &pad_sg, padding); 1215 } 1216 crypto_hash_final(hash, (u8 *) &data_crc); 1217 1218 return data_crc; 1219 } 1220 1221 static void iscsit_do_crypto_hash_buf( 1222 struct hash_desc *hash, 1223 unsigned char *buf, 1224 u32 payload_length, 1225 u32 padding, 1226 u8 *pad_bytes, 1227 u8 *data_crc) 1228 { 1229 struct scatterlist sg; 1230 1231 crypto_hash_init(hash); 1232 1233 sg_init_one(&sg, (u8 *)buf, payload_length); 1234 crypto_hash_update(hash, &sg, payload_length); 1235 1236 if (padding) { 1237 sg_init_one(&sg, pad_bytes, padding); 1238 crypto_hash_update(hash, &sg, padding); 1239 } 1240 crypto_hash_final(hash, data_crc); 1241 } 1242 1243 static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) 1244 { 1245 int iov_ret, ooo_cmdsn = 0, ret; 1246 u8 data_crc_failed = 0; 1247 u32 checksum, iov_count = 0, padding = 0, rx_got = 0; 1248 u32 rx_size = 0, payload_length; 1249 struct iscsi_cmd *cmd = NULL; 1250 struct se_cmd *se_cmd; 1251 struct iscsi_data *hdr; 1252 struct kvec *iov; 1253 unsigned long flags; 1254 1255 hdr = (struct iscsi_data *) buf; 1256 payload_length = ntoh24(hdr->dlength); 1257 hdr->itt = be32_to_cpu(hdr->itt); 1258 hdr->ttt = be32_to_cpu(hdr->ttt); 1259 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1260 hdr->datasn = be32_to_cpu(hdr->datasn); 1261 hdr->offset = be32_to_cpu(hdr->offset); 1262 1263 if (!payload_length) { 1264 pr_err("DataOUT payload is ZERO, protocol error.\n"); 1265 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1266 buf, conn); 1267 } 1268 1269 /* iSCSI write */ 1270 spin_lock_bh(&conn->sess->session_stats_lock); 1271 conn->sess->rx_data_octets += payload_length; 1272 if (conn->sess->se_sess->se_node_acl) { 1273 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); 1274 conn->sess->se_sess->se_node_acl->write_bytes += payload_length; 1275 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); 1276 } 1277 spin_unlock_bh(&conn->sess->session_stats_lock); 1278 1279 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1280 pr_err("DataSegmentLength: %u is greater than" 1281 " MaxRecvDataSegmentLength: %u\n", payload_length, 1282 conn->conn_ops->MaxRecvDataSegmentLength); 1283 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1284 buf, conn); 1285 } 1286 1287 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 1288 payload_length); 1289 if (!cmd) 1290 return 0; 1291 1292 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," 1293 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 1294 hdr->itt, hdr->ttt, hdr->datasn, hdr->offset, 1295 payload_length, conn->cid); 1296 1297 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { 1298 pr_err("Command ITT: 0x%08x received DataOUT after" 1299 " last DataOUT received, dumping payload\n", 1300 cmd->init_task_tag); 1301 return iscsit_dump_data_payload(conn, payload_length, 1); 1302 } 1303 1304 if (cmd->data_direction != DMA_TO_DEVICE) { 1305 pr_err("Command ITT: 0x%08x received DataOUT for a" 1306 " NON-WRITE command.\n", cmd->init_task_tag); 1307 return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR, 1308 1, 0, buf, cmd); 1309 } 1310 se_cmd = &cmd->se_cmd; 1311 iscsit_mod_dataout_timer(cmd); 1312 1313 if ((hdr->offset + payload_length) > cmd->data_length) { 1314 pr_err("DataOut Offset: %u, Length %u greater than" 1315 " iSCSI Command EDTL %u, protocol error.\n", 1316 hdr->offset, payload_length, cmd->data_length); 1317 return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID, 1318 1, 0, buf, cmd); 1319 } 1320 1321 if (cmd->unsolicited_data) { 1322 int dump_unsolicited_data = 0; 1323 1324 if (conn->sess->sess_ops->InitialR2T) { 1325 pr_err("Received unexpected unsolicited data" 1326 " while InitialR2T=Yes, protocol error.\n"); 1327 transport_send_check_condition_and_sense(&cmd->se_cmd, 1328 TCM_UNEXPECTED_UNSOLICITED_DATA, 0); 1329 return -1; 1330 } 1331 /* 1332 * Special case for dealing with Unsolicited DataOUT 1333 * and Unsupported SAM WRITE Opcodes and SE resource allocation 1334 * failures; 1335 */ 1336 1337 /* Something's amiss if we're not in WRITE_PENDING state... */ 1338 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1339 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING); 1340 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1341 1342 spin_lock_irqsave(&se_cmd->t_state_lock, flags); 1343 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) || 1344 (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED)) 1345 dump_unsolicited_data = 1; 1346 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 1347 1348 if (dump_unsolicited_data) { 1349 /* 1350 * Check if a delayed TASK_ABORTED status needs to 1351 * be sent now if the ISCSI_FLAG_CMD_FINAL has been 1352 * received with the unsolicitied data out. 1353 */ 1354 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1355 iscsit_stop_dataout_timer(cmd); 1356 1357 transport_check_aborted_status(se_cmd, 1358 (hdr->flags & ISCSI_FLAG_CMD_FINAL)); 1359 return iscsit_dump_data_payload(conn, payload_length, 1); 1360 } 1361 } else { 1362 /* 1363 * For the normal solicited data path: 1364 * 1365 * Check for a delayed TASK_ABORTED status and dump any 1366 * incoming data out payload if one exists. Also, when the 1367 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current 1368 * data out sequence, we decrement outstanding_r2ts. Once 1369 * outstanding_r2ts reaches zero, go ahead and send the delayed 1370 * TASK_ABORTED status. 1371 */ 1372 if (atomic_read(&se_cmd->t_transport_aborted) != 0) { 1373 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1374 if (--cmd->outstanding_r2ts < 1) { 1375 iscsit_stop_dataout_timer(cmd); 1376 transport_check_aborted_status( 1377 se_cmd, 1); 1378 } 1379 1380 return iscsit_dump_data_payload(conn, payload_length, 1); 1381 } 1382 } 1383 /* 1384 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and 1385 * within-command recovery checks before receiving the payload. 1386 */ 1387 ret = iscsit_check_pre_dataout(cmd, buf); 1388 if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY) 1389 return 0; 1390 else if (ret == DATAOUT_CANNOT_RECOVER) 1391 return -1; 1392 1393 rx_size += payload_length; 1394 iov = &cmd->iov_data[0]; 1395 1396 iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length); 1397 if (iov_ret < 0) 1398 return -1; 1399 1400 iov_count += iov_ret; 1401 1402 padding = ((-payload_length) & 3); 1403 if (padding != 0) { 1404 iov[iov_count].iov_base = cmd->pad_bytes; 1405 iov[iov_count++].iov_len = padding; 1406 rx_size += padding; 1407 pr_debug("Receiving %u padding bytes.\n", padding); 1408 } 1409 1410 if (conn->conn_ops->DataDigest) { 1411 iov[iov_count].iov_base = &checksum; 1412 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 1413 rx_size += ISCSI_CRC_LEN; 1414 } 1415 1416 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); 1417 1418 iscsit_unmap_iovec(cmd); 1419 1420 if (rx_got != rx_size) 1421 return -1; 1422 1423 if (conn->conn_ops->DataDigest) { 1424 u32 data_crc; 1425 1426 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, 1427 hdr->offset, payload_length, padding, 1428 cmd->pad_bytes); 1429 1430 if (checksum != data_crc) { 1431 pr_err("ITT: 0x%08x, Offset: %u, Length: %u," 1432 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x" 1433 " does not match computed 0x%08x\n", 1434 hdr->itt, hdr->offset, payload_length, 1435 hdr->datasn, checksum, data_crc); 1436 data_crc_failed = 1; 1437 } else { 1438 pr_debug("Got CRC32C DataDigest 0x%08x for" 1439 " %u bytes of Data Out\n", checksum, 1440 payload_length); 1441 } 1442 } 1443 /* 1444 * Increment post receive data and CRC values or perform 1445 * within-command recovery. 1446 */ 1447 ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed); 1448 if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)) 1449 return 0; 1450 else if (ret == DATAOUT_SEND_R2T) { 1451 iscsit_set_dataout_sequence_values(cmd); 1452 iscsit_build_r2ts_for_cmd(cmd, conn, 0); 1453 } else if (ret == DATAOUT_SEND_TO_TRANSPORT) { 1454 /* 1455 * Handle extra special case for out of order 1456 * Unsolicited Data Out. 1457 */ 1458 spin_lock_bh(&cmd->istate_lock); 1459 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN); 1460 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1461 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1462 spin_unlock_bh(&cmd->istate_lock); 1463 1464 iscsit_stop_dataout_timer(cmd); 1465 return (!ooo_cmdsn) ? transport_generic_handle_data( 1466 &cmd->se_cmd) : 0; 1467 } else /* DATAOUT_CANNOT_RECOVER */ 1468 return -1; 1469 1470 return 0; 1471 } 1472 1473 static int iscsit_handle_nop_out( 1474 struct iscsi_conn *conn, 1475 unsigned char *buf) 1476 { 1477 unsigned char *ping_data = NULL; 1478 int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size; 1479 u32 checksum, data_crc, padding = 0, payload_length; 1480 u64 lun; 1481 struct iscsi_cmd *cmd = NULL; 1482 struct kvec *iov = NULL; 1483 struct iscsi_nopout *hdr; 1484 1485 hdr = (struct iscsi_nopout *) buf; 1486 payload_length = ntoh24(hdr->dlength); 1487 lun = get_unaligned_le64(&hdr->lun); 1488 hdr->itt = be32_to_cpu(hdr->itt); 1489 hdr->ttt = be32_to_cpu(hdr->ttt); 1490 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 1491 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1492 1493 if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1494 pr_err("NOPOUT ITT is reserved, but Immediate Bit is" 1495 " not set, protocol error.\n"); 1496 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1497 buf, conn); 1498 } 1499 1500 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1501 pr_err("NOPOUT Ping Data DataSegmentLength: %u is" 1502 " greater than MaxRecvDataSegmentLength: %u, protocol" 1503 " error.\n", payload_length, 1504 conn->conn_ops->MaxRecvDataSegmentLength); 1505 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1506 buf, conn); 1507 } 1508 1509 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x," 1510 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", 1511 (hdr->itt == 0xFFFFFFFF) ? "Response" : "Request", 1512 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn, 1513 payload_length); 1514 /* 1515 * This is not a response to a Unsolicited NopIN, which means 1516 * it can either be a NOPOUT ping request (with a valid ITT), 1517 * or a NOPOUT not requesting a NOPIN (with a reserved ITT). 1518 * Either way, make sure we allocate an struct iscsi_cmd, as both 1519 * can contain ping data. 1520 */ 1521 if (hdr->ttt == 0xFFFFFFFF) { 1522 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1523 if (!cmd) 1524 return iscsit_add_reject( 1525 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1526 1, buf, conn); 1527 1528 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT; 1529 cmd->i_state = ISTATE_SEND_NOPIN; 1530 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1531 1 : 0); 1532 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1533 cmd->targ_xfer_tag = 0xFFFFFFFF; 1534 cmd->cmd_sn = hdr->cmdsn; 1535 cmd->exp_stat_sn = hdr->exp_statsn; 1536 cmd->data_direction = DMA_NONE; 1537 } 1538 1539 if (payload_length && (hdr->ttt == 0xFFFFFFFF)) { 1540 rx_size = payload_length; 1541 ping_data = kzalloc(payload_length + 1, GFP_KERNEL); 1542 if (!ping_data) { 1543 pr_err("Unable to allocate memory for" 1544 " NOPOUT ping data.\n"); 1545 ret = -1; 1546 goto out; 1547 } 1548 1549 iov = &cmd->iov_misc[0]; 1550 iov[niov].iov_base = ping_data; 1551 iov[niov++].iov_len = payload_length; 1552 1553 padding = ((-payload_length) & 3); 1554 if (padding != 0) { 1555 pr_debug("Receiving %u additional bytes" 1556 " for padding.\n", padding); 1557 iov[niov].iov_base = &cmd->pad_bytes; 1558 iov[niov++].iov_len = padding; 1559 rx_size += padding; 1560 } 1561 if (conn->conn_ops->DataDigest) { 1562 iov[niov].iov_base = &checksum; 1563 iov[niov++].iov_len = ISCSI_CRC_LEN; 1564 rx_size += ISCSI_CRC_LEN; 1565 } 1566 1567 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size); 1568 if (rx_got != rx_size) { 1569 ret = -1; 1570 goto out; 1571 } 1572 1573 if (conn->conn_ops->DataDigest) { 1574 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 1575 ping_data, payload_length, 1576 padding, cmd->pad_bytes, 1577 (u8 *)&data_crc); 1578 1579 if (checksum != data_crc) { 1580 pr_err("Ping data CRC32C DataDigest" 1581 " 0x%08x does not match computed 0x%08x\n", 1582 checksum, data_crc); 1583 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 1584 pr_err("Unable to recover from" 1585 " NOPOUT Ping DataCRC failure while in" 1586 " ERL=0.\n"); 1587 ret = -1; 1588 goto out; 1589 } else { 1590 /* 1591 * Silently drop this PDU and let the 1592 * initiator plug the CmdSN gap. 1593 */ 1594 pr_debug("Dropping NOPOUT" 1595 " Command CmdSN: 0x%08x due to" 1596 " DataCRC error.\n", hdr->cmdsn); 1597 ret = 0; 1598 goto out; 1599 } 1600 } else { 1601 pr_debug("Got CRC32C DataDigest" 1602 " 0x%08x for %u bytes of ping data.\n", 1603 checksum, payload_length); 1604 } 1605 } 1606 1607 ping_data[payload_length] = '\0'; 1608 /* 1609 * Attach ping data to struct iscsi_cmd->buf_ptr. 1610 */ 1611 cmd->buf_ptr = (void *)ping_data; 1612 cmd->buf_ptr_size = payload_length; 1613 1614 pr_debug("Got %u bytes of NOPOUT ping" 1615 " data.\n", payload_length); 1616 pr_debug("Ping Data: \"%s\"\n", ping_data); 1617 } 1618 1619 if (hdr->itt != 0xFFFFFFFF) { 1620 if (!cmd) { 1621 pr_err("Checking CmdSN for NOPOUT," 1622 " but cmd is NULL!\n"); 1623 return -1; 1624 } 1625 /* 1626 * Initiator is expecting a NopIN ping reply, 1627 */ 1628 spin_lock_bh(&conn->cmd_lock); 1629 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1630 spin_unlock_bh(&conn->cmd_lock); 1631 1632 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1633 1634 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 1635 iscsit_add_cmd_to_response_queue(cmd, conn, 1636 cmd->i_state); 1637 return 0; 1638 } 1639 1640 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1641 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 1642 ret = 0; 1643 goto ping_out; 1644 } 1645 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1646 return iscsit_add_reject_from_cmd( 1647 ISCSI_REASON_PROTOCOL_ERROR, 1648 1, 0, buf, cmd); 1649 1650 return 0; 1651 } 1652 1653 if (hdr->ttt != 0xFFFFFFFF) { 1654 /* 1655 * This was a response to a unsolicited NOPIN ping. 1656 */ 1657 cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt); 1658 if (!cmd) 1659 return -1; 1660 1661 iscsit_stop_nopin_response_timer(conn); 1662 1663 cmd->i_state = ISTATE_REMOVE; 1664 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 1665 iscsit_start_nopin_timer(conn); 1666 } else { 1667 /* 1668 * Initiator is not expecting a NOPIN is response. 1669 * Just ignore for now. 1670 * 1671 * iSCSI v19-91 10.18 1672 * "A NOP-OUT may also be used to confirm a changed 1673 * ExpStatSN if another PDU will not be available 1674 * for a long time." 1675 */ 1676 ret = 0; 1677 goto out; 1678 } 1679 1680 return 0; 1681 out: 1682 if (cmd) 1683 iscsit_release_cmd(cmd); 1684 ping_out: 1685 kfree(ping_data); 1686 return ret; 1687 } 1688 1689 static int iscsit_handle_task_mgt_cmd( 1690 struct iscsi_conn *conn, 1691 unsigned char *buf) 1692 { 1693 struct iscsi_cmd *cmd; 1694 struct se_tmr_req *se_tmr; 1695 struct iscsi_tmr_req *tmr_req; 1696 struct iscsi_tm *hdr; 1697 u32 payload_length; 1698 int out_of_order_cmdsn = 0; 1699 int ret; 1700 u8 function; 1701 1702 hdr = (struct iscsi_tm *) buf; 1703 payload_length = ntoh24(hdr->dlength); 1704 hdr->itt = be32_to_cpu(hdr->itt); 1705 hdr->rtt = be32_to_cpu(hdr->rtt); 1706 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 1707 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1708 hdr->refcmdsn = be32_to_cpu(hdr->refcmdsn); 1709 hdr->exp_datasn = be32_to_cpu(hdr->exp_datasn); 1710 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 1711 function = hdr->flags; 1712 1713 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:" 1714 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:" 1715 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function, 1716 hdr->rtt, hdr->refcmdsn, conn->cid); 1717 1718 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1719 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 1720 (hdr->rtt != ISCSI_RESERVED_TAG))) { 1721 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n"); 1722 hdr->rtt = ISCSI_RESERVED_TAG; 1723 } 1724 1725 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) && 1726 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1727 pr_err("Task Management Request TASK_REASSIGN not" 1728 " issued as immediate command, bad iSCSI Initiator" 1729 "implementation\n"); 1730 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1731 buf, conn); 1732 } 1733 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1734 (hdr->refcmdsn != ISCSI_RESERVED_TAG)) 1735 hdr->refcmdsn = ISCSI_RESERVED_TAG; 1736 1737 cmd = iscsit_allocate_se_cmd_for_tmr(conn, function); 1738 if (!cmd) 1739 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1740 1, buf, conn); 1741 1742 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; 1743 cmd->i_state = ISTATE_SEND_TASKMGTRSP; 1744 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1745 cmd->init_task_tag = hdr->itt; 1746 cmd->targ_xfer_tag = 0xFFFFFFFF; 1747 cmd->cmd_sn = hdr->cmdsn; 1748 cmd->exp_stat_sn = hdr->exp_statsn; 1749 se_tmr = cmd->se_cmd.se_tmr_req; 1750 tmr_req = cmd->tmr_req; 1751 /* 1752 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN 1753 */ 1754 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { 1755 ret = iscsit_get_lun_for_tmr(cmd, 1756 get_unaligned_le64(&hdr->lun)); 1757 if (ret < 0) { 1758 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1759 se_tmr->response = ISCSI_TMF_RSP_NO_LUN; 1760 goto attach; 1761 } 1762 } 1763 1764 switch (function) { 1765 case ISCSI_TM_FUNC_ABORT_TASK: 1766 se_tmr->response = iscsit_tmr_abort_task(cmd, buf); 1767 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) { 1768 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1769 goto attach; 1770 } 1771 break; 1772 case ISCSI_TM_FUNC_ABORT_TASK_SET: 1773 case ISCSI_TM_FUNC_CLEAR_ACA: 1774 case ISCSI_TM_FUNC_CLEAR_TASK_SET: 1775 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: 1776 break; 1777 case ISCSI_TM_FUNC_TARGET_WARM_RESET: 1778 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) { 1779 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1780 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; 1781 goto attach; 1782 } 1783 break; 1784 case ISCSI_TM_FUNC_TARGET_COLD_RESET: 1785 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) { 1786 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1787 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; 1788 goto attach; 1789 } 1790 break; 1791 case ISCSI_TM_FUNC_TASK_REASSIGN: 1792 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf); 1793 /* 1794 * Perform sanity checks on the ExpDataSN only if the 1795 * TASK_REASSIGN was successful. 1796 */ 1797 if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) 1798 break; 1799 1800 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0) 1801 return iscsit_add_reject_from_cmd( 1802 ISCSI_REASON_BOOKMARK_INVALID, 1, 1, 1803 buf, cmd); 1804 break; 1805 default: 1806 pr_err("Unknown TMR function: 0x%02x, protocol" 1807 " error.\n", function); 1808 cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; 1809 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED; 1810 goto attach; 1811 } 1812 1813 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 1814 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE)) 1815 se_tmr->call_transport = 1; 1816 attach: 1817 spin_lock_bh(&conn->cmd_lock); 1818 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1819 spin_unlock_bh(&conn->cmd_lock); 1820 1821 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1822 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 1823 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) 1824 out_of_order_cmdsn = 1; 1825 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1826 return 0; 1827 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1828 return iscsit_add_reject_from_cmd( 1829 ISCSI_REASON_PROTOCOL_ERROR, 1830 1, 0, buf, cmd); 1831 } 1832 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1833 1834 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE)) 1835 return 0; 1836 /* 1837 * Found the referenced task, send to transport for processing. 1838 */ 1839 if (se_tmr->call_transport) 1840 return transport_generic_handle_tmr(&cmd->se_cmd); 1841 1842 /* 1843 * Could not find the referenced LUN, task, or Task Management 1844 * command not authorized or supported. Change state and 1845 * let the tx_thread send the response. 1846 * 1847 * For connection recovery, this is also the default action for 1848 * TMR TASK_REASSIGN. 1849 */ 1850 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 1851 return 0; 1852 } 1853 1854 /* #warning FIXME: Support Text Command parameters besides SendTargets */ 1855 static int iscsit_handle_text_cmd( 1856 struct iscsi_conn *conn, 1857 unsigned char *buf) 1858 { 1859 char *text_ptr, *text_in; 1860 int cmdsn_ret, niov = 0, rx_got, rx_size; 1861 u32 checksum = 0, data_crc = 0, payload_length; 1862 u32 padding = 0, pad_bytes = 0, text_length = 0; 1863 struct iscsi_cmd *cmd; 1864 struct kvec iov[3]; 1865 struct iscsi_text *hdr; 1866 1867 hdr = (struct iscsi_text *) buf; 1868 payload_length = ntoh24(hdr->dlength); 1869 hdr->itt = be32_to_cpu(hdr->itt); 1870 hdr->ttt = be32_to_cpu(hdr->ttt); 1871 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 1872 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 1873 1874 if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) { 1875 pr_err("Unable to accept text parameter length: %u" 1876 "greater than MaxRecvDataSegmentLength %u.\n", 1877 payload_length, conn->conn_ops->MaxRecvDataSegmentLength); 1878 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 1879 buf, conn); 1880 } 1881 1882 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x," 1883 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn, 1884 hdr->exp_statsn, payload_length); 1885 1886 rx_size = text_length = payload_length; 1887 if (text_length) { 1888 text_in = kzalloc(text_length, GFP_KERNEL); 1889 if (!text_in) { 1890 pr_err("Unable to allocate memory for" 1891 " incoming text parameters\n"); 1892 return -1; 1893 } 1894 1895 memset(iov, 0, 3 * sizeof(struct kvec)); 1896 iov[niov].iov_base = text_in; 1897 iov[niov++].iov_len = text_length; 1898 1899 padding = ((-payload_length) & 3); 1900 if (padding != 0) { 1901 iov[niov].iov_base = &pad_bytes; 1902 iov[niov++].iov_len = padding; 1903 rx_size += padding; 1904 pr_debug("Receiving %u additional bytes" 1905 " for padding.\n", padding); 1906 } 1907 if (conn->conn_ops->DataDigest) { 1908 iov[niov].iov_base = &checksum; 1909 iov[niov++].iov_len = ISCSI_CRC_LEN; 1910 rx_size += ISCSI_CRC_LEN; 1911 } 1912 1913 rx_got = rx_data(conn, &iov[0], niov, rx_size); 1914 if (rx_got != rx_size) { 1915 kfree(text_in); 1916 return -1; 1917 } 1918 1919 if (conn->conn_ops->DataDigest) { 1920 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 1921 text_in, text_length, 1922 padding, (u8 *)&pad_bytes, 1923 (u8 *)&data_crc); 1924 1925 if (checksum != data_crc) { 1926 pr_err("Text data CRC32C DataDigest" 1927 " 0x%08x does not match computed" 1928 " 0x%08x\n", checksum, data_crc); 1929 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 1930 pr_err("Unable to recover from" 1931 " Text Data digest failure while in" 1932 " ERL=0.\n"); 1933 kfree(text_in); 1934 return -1; 1935 } else { 1936 /* 1937 * Silently drop this PDU and let the 1938 * initiator plug the CmdSN gap. 1939 */ 1940 pr_debug("Dropping Text" 1941 " Command CmdSN: 0x%08x due to" 1942 " DataCRC error.\n", hdr->cmdsn); 1943 kfree(text_in); 1944 return 0; 1945 } 1946 } else { 1947 pr_debug("Got CRC32C DataDigest" 1948 " 0x%08x for %u bytes of text data.\n", 1949 checksum, text_length); 1950 } 1951 } 1952 text_in[text_length - 1] = '\0'; 1953 pr_debug("Successfully read %d bytes of text" 1954 " data.\n", text_length); 1955 1956 if (strncmp("SendTargets", text_in, 11) != 0) { 1957 pr_err("Received Text Data that is not" 1958 " SendTargets, cannot continue.\n"); 1959 kfree(text_in); 1960 return -1; 1961 } 1962 text_ptr = strchr(text_in, '='); 1963 if (!text_ptr) { 1964 pr_err("No \"=\" separator found in Text Data," 1965 " cannot continue.\n"); 1966 kfree(text_in); 1967 return -1; 1968 } 1969 if (strncmp("=All", text_ptr, 4) != 0) { 1970 pr_err("Unable to locate All value for" 1971 " SendTargets key, cannot continue.\n"); 1972 kfree(text_in); 1973 return -1; 1974 } 1975 /*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */ 1976 kfree(text_in); 1977 } 1978 1979 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 1980 if (!cmd) 1981 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1982 1, buf, conn); 1983 1984 cmd->iscsi_opcode = ISCSI_OP_TEXT; 1985 cmd->i_state = ISTATE_SEND_TEXTRSP; 1986 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1987 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1988 cmd->targ_xfer_tag = 0xFFFFFFFF; 1989 cmd->cmd_sn = hdr->cmdsn; 1990 cmd->exp_stat_sn = hdr->exp_statsn; 1991 cmd->data_direction = DMA_NONE; 1992 1993 spin_lock_bh(&conn->cmd_lock); 1994 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1995 spin_unlock_bh(&conn->cmd_lock); 1996 1997 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 1998 1999 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 2000 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 2001 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 2002 return iscsit_add_reject_from_cmd( 2003 ISCSI_REASON_PROTOCOL_ERROR, 2004 1, 0, buf, cmd); 2005 2006 return 0; 2007 } 2008 2009 return iscsit_execute_cmd(cmd, 0); 2010 } 2011 2012 int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2013 { 2014 struct iscsi_conn *conn_p; 2015 struct iscsi_session *sess = conn->sess; 2016 2017 pr_debug("Received logout request CLOSESESSION on CID: %hu" 2018 " for SID: %u.\n", conn->cid, conn->sess->sid); 2019 2020 atomic_set(&sess->session_logout, 1); 2021 atomic_set(&conn->conn_logout_remove, 1); 2022 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION; 2023 2024 iscsit_inc_conn_usage_count(conn); 2025 iscsit_inc_session_usage_count(sess); 2026 2027 spin_lock_bh(&sess->conn_lock); 2028 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) { 2029 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN) 2030 continue; 2031 2032 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 2033 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT; 2034 } 2035 spin_unlock_bh(&sess->conn_lock); 2036 2037 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2038 2039 return 0; 2040 } 2041 2042 int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2043 { 2044 struct iscsi_conn *l_conn; 2045 struct iscsi_session *sess = conn->sess; 2046 2047 pr_debug("Received logout request CLOSECONNECTION for CID:" 2048 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); 2049 2050 /* 2051 * A Logout Request with a CLOSECONNECTION reason code for a CID 2052 * can arrive on a connection with a differing CID. 2053 */ 2054 if (conn->cid == cmd->logout_cid) { 2055 spin_lock_bh(&conn->state_lock); 2056 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 2057 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; 2058 2059 atomic_set(&conn->conn_logout_remove, 1); 2060 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION; 2061 iscsit_inc_conn_usage_count(conn); 2062 2063 spin_unlock_bh(&conn->state_lock); 2064 } else { 2065 /* 2066 * Handle all different cid CLOSECONNECTION requests in 2067 * iscsit_logout_post_handler_diffcid() as to give enough 2068 * time for any non immediate command's CmdSN to be 2069 * acknowledged on the connection in question. 2070 * 2071 * Here we simply make sure the CID is still around. 2072 */ 2073 l_conn = iscsit_get_conn_from_cid(sess, 2074 cmd->logout_cid); 2075 if (!l_conn) { 2076 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; 2077 iscsit_add_cmd_to_response_queue(cmd, conn, 2078 cmd->i_state); 2079 return 0; 2080 } 2081 2082 iscsit_dec_conn_usage_count(l_conn); 2083 } 2084 2085 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2086 2087 return 0; 2088 } 2089 2090 int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn) 2091 { 2092 struct iscsi_session *sess = conn->sess; 2093 2094 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for" 2095 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); 2096 2097 if (sess->sess_ops->ErrorRecoveryLevel != 2) { 2098 pr_err("Received Logout Request REMOVECONNFORRECOVERY" 2099 " while ERL!=2.\n"); 2100 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED; 2101 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2102 return 0; 2103 } 2104 2105 if (conn->cid == cmd->logout_cid) { 2106 pr_err("Received Logout Request REMOVECONNFORRECOVERY" 2107 " with CID: %hu on CID: %hu, implementation error.\n", 2108 cmd->logout_cid, conn->cid); 2109 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED; 2110 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2111 return 0; 2112 } 2113 2114 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2115 2116 return 0; 2117 } 2118 2119 static int iscsit_handle_logout_cmd( 2120 struct iscsi_conn *conn, 2121 unsigned char *buf) 2122 { 2123 int cmdsn_ret, logout_remove = 0; 2124 u8 reason_code = 0; 2125 struct iscsi_cmd *cmd; 2126 struct iscsi_logout *hdr; 2127 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn); 2128 2129 hdr = (struct iscsi_logout *) buf; 2130 reason_code = (hdr->flags & 0x7f); 2131 hdr->itt = be32_to_cpu(hdr->itt); 2132 hdr->cid = be16_to_cpu(hdr->cid); 2133 hdr->cmdsn = be32_to_cpu(hdr->cmdsn); 2134 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 2135 2136 if (tiqn) { 2137 spin_lock(&tiqn->logout_stats.lock); 2138 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) 2139 tiqn->logout_stats.normal_logouts++; 2140 else 2141 tiqn->logout_stats.abnormal_logouts++; 2142 spin_unlock(&tiqn->logout_stats.lock); 2143 } 2144 2145 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x" 2146 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n", 2147 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code, 2148 hdr->cid, conn->cid); 2149 2150 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { 2151 pr_err("Received logout request on connection that" 2152 " is not in logged in state, ignoring request.\n"); 2153 return 0; 2154 } 2155 2156 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 2157 if (!cmd) 2158 return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1, 2159 buf, conn); 2160 2161 cmd->iscsi_opcode = ISCSI_OP_LOGOUT; 2162 cmd->i_state = ISTATE_SEND_LOGOUTRSP; 2163 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 2164 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 2165 cmd->targ_xfer_tag = 0xFFFFFFFF; 2166 cmd->cmd_sn = hdr->cmdsn; 2167 cmd->exp_stat_sn = hdr->exp_statsn; 2168 cmd->logout_cid = hdr->cid; 2169 cmd->logout_reason = reason_code; 2170 cmd->data_direction = DMA_NONE; 2171 2172 /* 2173 * We need to sleep in these cases (by returning 1) until the Logout 2174 * Response gets sent in the tx thread. 2175 */ 2176 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) || 2177 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) && 2178 (hdr->cid == conn->cid))) 2179 logout_remove = 1; 2180 2181 spin_lock_bh(&conn->cmd_lock); 2182 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 2183 spin_unlock_bh(&conn->cmd_lock); 2184 2185 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY) 2186 iscsit_ack_from_expstatsn(conn, hdr->exp_statsn); 2187 2188 /* 2189 * Immediate commands are executed, well, immediately. 2190 * Non-Immediate Logout Commands are executed in CmdSN order. 2191 */ 2192 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 2193 int ret = iscsit_execute_cmd(cmd, 0); 2194 2195 if (ret < 0) 2196 return ret; 2197 } else { 2198 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn); 2199 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 2200 logout_remove = 0; 2201 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { 2202 return iscsit_add_reject_from_cmd( 2203 ISCSI_REASON_PROTOCOL_ERROR, 2204 1, 0, buf, cmd); 2205 } 2206 } 2207 2208 return logout_remove; 2209 } 2210 2211 static int iscsit_handle_snack( 2212 struct iscsi_conn *conn, 2213 unsigned char *buf) 2214 { 2215 u32 unpacked_lun; 2216 u64 lun; 2217 struct iscsi_snack *hdr; 2218 2219 hdr = (struct iscsi_snack *) buf; 2220 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 2221 lun = get_unaligned_le64(&hdr->lun); 2222 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 2223 hdr->itt = be32_to_cpu(hdr->itt); 2224 hdr->ttt = be32_to_cpu(hdr->ttt); 2225 hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn); 2226 hdr->begrun = be32_to_cpu(hdr->begrun); 2227 hdr->runlength = be32_to_cpu(hdr->runlength); 2228 2229 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:" 2230 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x," 2231 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags, 2232 hdr->begrun, hdr->runlength, conn->cid); 2233 2234 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2235 pr_err("Initiator sent SNACK request while in" 2236 " ErrorRecoveryLevel=0.\n"); 2237 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 2238 buf, conn); 2239 } 2240 /* 2241 * SNACK_DATA and SNACK_R2T are both 0, so check which function to 2242 * call from inside iscsi_send_recovery_datain_or_r2t(). 2243 */ 2244 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) { 2245 case 0: 2246 return iscsit_handle_recovery_datain_or_r2t(conn, buf, 2247 hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); 2248 case ISCSI_FLAG_SNACK_TYPE_STATUS: 2249 return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, 2250 hdr->begrun, hdr->runlength); 2251 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK: 2252 return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun, 2253 hdr->runlength); 2254 case ISCSI_FLAG_SNACK_TYPE_RDATA: 2255 /* FIXME: Support R-Data SNACK */ 2256 pr_err("R-Data SNACK Not Supported.\n"); 2257 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 2258 buf, conn); 2259 default: 2260 pr_err("Unknown SNACK type 0x%02x, protocol" 2261 " error.\n", hdr->flags & 0x0f); 2262 return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 2263 buf, conn); 2264 } 2265 2266 return 0; 2267 } 2268 2269 static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn) 2270 { 2271 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || 2272 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { 2273 wait_for_completion_interruptible_timeout( 2274 &conn->rx_half_close_comp, 2275 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ); 2276 } 2277 } 2278 2279 static int iscsit_handle_immediate_data( 2280 struct iscsi_cmd *cmd, 2281 unsigned char *buf, 2282 u32 length) 2283 { 2284 int iov_ret, rx_got = 0, rx_size = 0; 2285 u32 checksum, iov_count = 0, padding = 0; 2286 struct iscsi_conn *conn = cmd->conn; 2287 struct kvec *iov; 2288 2289 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length); 2290 if (iov_ret < 0) 2291 return IMMEDIATE_DATA_CANNOT_RECOVER; 2292 2293 rx_size = length; 2294 iov_count = iov_ret; 2295 iov = &cmd->iov_data[0]; 2296 2297 padding = ((-length) & 3); 2298 if (padding != 0) { 2299 iov[iov_count].iov_base = cmd->pad_bytes; 2300 iov[iov_count++].iov_len = padding; 2301 rx_size += padding; 2302 } 2303 2304 if (conn->conn_ops->DataDigest) { 2305 iov[iov_count].iov_base = &checksum; 2306 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 2307 rx_size += ISCSI_CRC_LEN; 2308 } 2309 2310 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); 2311 2312 iscsit_unmap_iovec(cmd); 2313 2314 if (rx_got != rx_size) { 2315 iscsit_rx_thread_wait_for_tcp(conn); 2316 return IMMEDIATE_DATA_CANNOT_RECOVER; 2317 } 2318 2319 if (conn->conn_ops->DataDigest) { 2320 u32 data_crc; 2321 2322 data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd, 2323 cmd->write_data_done, length, padding, 2324 cmd->pad_bytes); 2325 2326 if (checksum != data_crc) { 2327 pr_err("ImmediateData CRC32C DataDigest 0x%08x" 2328 " does not match computed 0x%08x\n", checksum, 2329 data_crc); 2330 2331 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2332 pr_err("Unable to recover from" 2333 " Immediate Data digest failure while" 2334 " in ERL=0.\n"); 2335 iscsit_add_reject_from_cmd( 2336 ISCSI_REASON_DATA_DIGEST_ERROR, 2337 1, 0, buf, cmd); 2338 return IMMEDIATE_DATA_CANNOT_RECOVER; 2339 } else { 2340 iscsit_add_reject_from_cmd( 2341 ISCSI_REASON_DATA_DIGEST_ERROR, 2342 0, 0, buf, cmd); 2343 return IMMEDIATE_DATA_ERL1_CRC_FAILURE; 2344 } 2345 } else { 2346 pr_debug("Got CRC32C DataDigest 0x%08x for" 2347 " %u bytes of Immediate Data\n", checksum, 2348 length); 2349 } 2350 } 2351 2352 cmd->write_data_done += length; 2353 2354 if (cmd->write_data_done == cmd->data_length) { 2355 spin_lock_bh(&cmd->istate_lock); 2356 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 2357 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 2358 spin_unlock_bh(&cmd->istate_lock); 2359 } 2360 2361 return IMMEDIATE_DATA_NORMAL_OPERATION; 2362 } 2363 2364 /* 2365 * Called with sess->conn_lock held. 2366 */ 2367 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections 2368 with active network interface */ 2369 static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn) 2370 { 2371 struct iscsi_cmd *cmd; 2372 struct iscsi_conn *conn_p; 2373 2374 /* 2375 * Only send a Asynchronous Message on connections whos network 2376 * interface is still functional. 2377 */ 2378 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) { 2379 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) { 2380 iscsit_inc_conn_usage_count(conn_p); 2381 break; 2382 } 2383 } 2384 2385 if (!conn_p) 2386 return; 2387 2388 cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL); 2389 if (!cmd) { 2390 iscsit_dec_conn_usage_count(conn_p); 2391 return; 2392 } 2393 2394 cmd->logout_cid = conn->cid; 2395 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2396 cmd->i_state = ISTATE_SEND_ASYNCMSG; 2397 2398 spin_lock_bh(&conn_p->cmd_lock); 2399 list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list); 2400 spin_unlock_bh(&conn_p->cmd_lock); 2401 2402 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state); 2403 iscsit_dec_conn_usage_count(conn_p); 2404 } 2405 2406 static int iscsit_send_conn_drop_async_message( 2407 struct iscsi_cmd *cmd, 2408 struct iscsi_conn *conn) 2409 { 2410 struct iscsi_async *hdr; 2411 2412 cmd->tx_size = ISCSI_HDR_LEN; 2413 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2414 2415 hdr = (struct iscsi_async *) cmd->pdu; 2416 hdr->opcode = ISCSI_OP_ASYNC_EVENT; 2417 hdr->flags = ISCSI_FLAG_CMD_FINAL; 2418 cmd->init_task_tag = 0xFFFFFFFF; 2419 cmd->targ_xfer_tag = 0xFFFFFFFF; 2420 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]); 2421 cmd->stat_sn = conn->stat_sn++; 2422 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2423 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2424 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2425 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION; 2426 hdr->param1 = cpu_to_be16(cmd->logout_cid); 2427 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait); 2428 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain); 2429 2430 if (conn->conn_ops->HeaderDigest) { 2431 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2432 2433 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2434 (unsigned char *)hdr, ISCSI_HDR_LEN, 2435 0, NULL, (u8 *)header_digest); 2436 2437 cmd->tx_size += ISCSI_CRC_LEN; 2438 pr_debug("Attaching CRC32C HeaderDigest to" 2439 " Async Message 0x%08x\n", *header_digest); 2440 } 2441 2442 cmd->iov_misc[0].iov_base = cmd->pdu; 2443 cmd->iov_misc[0].iov_len = cmd->tx_size; 2444 cmd->iov_misc_count = 1; 2445 2446 pr_debug("Sending Connection Dropped Async Message StatSN:" 2447 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn, 2448 cmd->logout_cid, conn->cid); 2449 return 0; 2450 } 2451 2452 static int iscsit_send_data_in( 2453 struct iscsi_cmd *cmd, 2454 struct iscsi_conn *conn, 2455 int *eodr) 2456 { 2457 int iov_ret = 0, set_statsn = 0; 2458 u32 iov_count = 0, tx_size = 0; 2459 struct iscsi_datain datain; 2460 struct iscsi_datain_req *dr; 2461 struct iscsi_data_rsp *hdr; 2462 struct kvec *iov; 2463 2464 memset(&datain, 0, sizeof(struct iscsi_datain)); 2465 dr = iscsit_get_datain_values(cmd, &datain); 2466 if (!dr) { 2467 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n", 2468 cmd->init_task_tag); 2469 return -1; 2470 } 2471 2472 /* 2473 * Be paranoid and double check the logic for now. 2474 */ 2475 if ((datain.offset + datain.length) > cmd->data_length) { 2476 pr_err("Command ITT: 0x%08x, datain.offset: %u and" 2477 " datain.length: %u exceeds cmd->data_length: %u\n", 2478 cmd->init_task_tag, datain.offset, datain.length, 2479 cmd->data_length); 2480 return -1; 2481 } 2482 2483 spin_lock_bh(&conn->sess->session_stats_lock); 2484 conn->sess->tx_data_octets += datain.length; 2485 if (conn->sess->se_sess->se_node_acl) { 2486 spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); 2487 conn->sess->se_sess->se_node_acl->read_bytes += datain.length; 2488 spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); 2489 } 2490 spin_unlock_bh(&conn->sess->session_stats_lock); 2491 /* 2492 * Special case for successfully execution w/ both DATAIN 2493 * and Sense Data. 2494 */ 2495 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) && 2496 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) 2497 datain.flags &= ~ISCSI_FLAG_DATA_STATUS; 2498 else { 2499 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) || 2500 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) { 2501 iscsit_increment_maxcmdsn(cmd, conn->sess); 2502 cmd->stat_sn = conn->stat_sn++; 2503 set_statsn = 1; 2504 } else if (dr->dr_complete == 2505 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY) 2506 set_statsn = 1; 2507 } 2508 2509 hdr = (struct iscsi_data_rsp *) cmd->pdu; 2510 memset(hdr, 0, ISCSI_HDR_LEN); 2511 hdr->opcode = ISCSI_OP_SCSI_DATA_IN; 2512 hdr->flags = datain.flags; 2513 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 2514 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 2515 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; 2516 hdr->residual_count = cpu_to_be32(cmd->residual_count); 2517 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 2518 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; 2519 hdr->residual_count = cpu_to_be32(cmd->residual_count); 2520 } 2521 } 2522 hton24(hdr->dlength, datain.length); 2523 if (hdr->flags & ISCSI_FLAG_DATA_ACK) 2524 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2525 (struct scsi_lun *)&hdr->lun); 2526 else 2527 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2528 2529 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2530 hdr->ttt = (hdr->flags & ISCSI_FLAG_DATA_ACK) ? 2531 cpu_to_be32(cmd->targ_xfer_tag) : 2532 0xFFFFFFFF; 2533 hdr->statsn = (set_statsn) ? cpu_to_be32(cmd->stat_sn) : 2534 0xFFFFFFFF; 2535 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2536 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2537 hdr->datasn = cpu_to_be32(datain.data_sn); 2538 hdr->offset = cpu_to_be32(datain.offset); 2539 2540 iov = &cmd->iov_data[0]; 2541 iov[iov_count].iov_base = cmd->pdu; 2542 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 2543 tx_size += ISCSI_HDR_LEN; 2544 2545 if (conn->conn_ops->HeaderDigest) { 2546 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2547 2548 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2549 (unsigned char *)hdr, ISCSI_HDR_LEN, 2550 0, NULL, (u8 *)header_digest); 2551 2552 iov[0].iov_len += ISCSI_CRC_LEN; 2553 tx_size += ISCSI_CRC_LEN; 2554 2555 pr_debug("Attaching CRC32 HeaderDigest" 2556 " for DataIN PDU 0x%08x\n", *header_digest); 2557 } 2558 2559 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length); 2560 if (iov_ret < 0) 2561 return -1; 2562 2563 iov_count += iov_ret; 2564 tx_size += datain.length; 2565 2566 cmd->padding = ((-datain.length) & 3); 2567 if (cmd->padding) { 2568 iov[iov_count].iov_base = cmd->pad_bytes; 2569 iov[iov_count++].iov_len = cmd->padding; 2570 tx_size += cmd->padding; 2571 2572 pr_debug("Attaching %u padding bytes\n", 2573 cmd->padding); 2574 } 2575 if (conn->conn_ops->DataDigest) { 2576 cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd, 2577 datain.offset, datain.length, cmd->padding, cmd->pad_bytes); 2578 2579 iov[iov_count].iov_base = &cmd->data_crc; 2580 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 2581 tx_size += ISCSI_CRC_LEN; 2582 2583 pr_debug("Attached CRC32C DataDigest %d bytes, crc" 2584 " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc); 2585 } 2586 2587 cmd->iov_data_count = iov_count; 2588 cmd->tx_size = tx_size; 2589 2590 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x," 2591 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 2592 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), 2593 ntohl(hdr->offset), datain.length, conn->cid); 2594 2595 if (dr->dr_complete) { 2596 *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? 2597 2 : 1; 2598 iscsit_free_datain_req(cmd, dr); 2599 } 2600 2601 return 0; 2602 } 2603 2604 static int iscsit_send_logout_response( 2605 struct iscsi_cmd *cmd, 2606 struct iscsi_conn *conn) 2607 { 2608 int niov = 0, tx_size; 2609 struct iscsi_conn *logout_conn = NULL; 2610 struct iscsi_conn_recovery *cr = NULL; 2611 struct iscsi_session *sess = conn->sess; 2612 struct kvec *iov; 2613 struct iscsi_logout_rsp *hdr; 2614 /* 2615 * The actual shutting down of Sessions and/or Connections 2616 * for CLOSESESSION and CLOSECONNECTION Logout Requests 2617 * is done in scsi_logout_post_handler(). 2618 */ 2619 switch (cmd->logout_reason) { 2620 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 2621 pr_debug("iSCSI session logout successful, setting" 2622 " logout response to ISCSI_LOGOUT_SUCCESS.\n"); 2623 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2624 break; 2625 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 2626 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND) 2627 break; 2628 /* 2629 * For CLOSECONNECTION logout requests carrying 2630 * a matching logout CID -> local CID, the reference 2631 * for the local CID will have been incremented in 2632 * iscsi_logout_closeconnection(). 2633 * 2634 * For CLOSECONNECTION logout requests carrying 2635 * a different CID than the connection it arrived 2636 * on, the connection responding to cmd->logout_cid 2637 * is stopped in iscsit_logout_post_handler_diffcid(). 2638 */ 2639 2640 pr_debug("iSCSI CID: %hu logout on CID: %hu" 2641 " successful.\n", cmd->logout_cid, conn->cid); 2642 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2643 break; 2644 case ISCSI_LOGOUT_REASON_RECOVERY: 2645 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) || 2646 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED)) 2647 break; 2648 /* 2649 * If the connection is still active from our point of view 2650 * force connection recovery to occur. 2651 */ 2652 logout_conn = iscsit_get_conn_from_cid_rcfr(sess, 2653 cmd->logout_cid); 2654 if ((logout_conn)) { 2655 iscsit_connection_reinstatement_rcfr(logout_conn); 2656 iscsit_dec_conn_usage_count(logout_conn); 2657 } 2658 2659 cr = iscsit_get_inactive_connection_recovery_entry( 2660 conn->sess, cmd->logout_cid); 2661 if (!cr) { 2662 pr_err("Unable to locate CID: %hu for" 2663 " REMOVECONNFORRECOVERY Logout Request.\n", 2664 cmd->logout_cid); 2665 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; 2666 break; 2667 } 2668 2669 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn); 2670 2671 pr_debug("iSCSI REMOVECONNFORRECOVERY logout" 2672 " for recovery for CID: %hu on CID: %hu successful.\n", 2673 cmd->logout_cid, conn->cid); 2674 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2675 break; 2676 default: 2677 pr_err("Unknown cmd->logout_reason: 0x%02x\n", 2678 cmd->logout_reason); 2679 return -1; 2680 } 2681 2682 tx_size = ISCSI_HDR_LEN; 2683 hdr = (struct iscsi_logout_rsp *)cmd->pdu; 2684 memset(hdr, 0, ISCSI_HDR_LEN); 2685 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 2686 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2687 hdr->response = cmd->logout_response; 2688 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2689 cmd->stat_sn = conn->stat_sn++; 2690 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2691 2692 iscsit_increment_maxcmdsn(cmd, conn->sess); 2693 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2694 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2695 2696 iov = &cmd->iov_misc[0]; 2697 iov[niov].iov_base = cmd->pdu; 2698 iov[niov++].iov_len = ISCSI_HDR_LEN; 2699 2700 if (conn->conn_ops->HeaderDigest) { 2701 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2702 2703 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2704 (unsigned char *)hdr, ISCSI_HDR_LEN, 2705 0, NULL, (u8 *)header_digest); 2706 2707 iov[0].iov_len += ISCSI_CRC_LEN; 2708 tx_size += ISCSI_CRC_LEN; 2709 pr_debug("Attaching CRC32C HeaderDigest to" 2710 " Logout Response 0x%08x\n", *header_digest); 2711 } 2712 cmd->iov_misc_count = niov; 2713 cmd->tx_size = tx_size; 2714 2715 pr_debug("Sending Logout Response ITT: 0x%08x StatSN:" 2716 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n", 2717 cmd->init_task_tag, cmd->stat_sn, hdr->response, 2718 cmd->logout_cid, conn->cid); 2719 2720 return 0; 2721 } 2722 2723 /* 2724 * Unsolicited NOPIN, either requesting a response or not. 2725 */ 2726 static int iscsit_send_unsolicited_nopin( 2727 struct iscsi_cmd *cmd, 2728 struct iscsi_conn *conn, 2729 int want_response) 2730 { 2731 int tx_size = ISCSI_HDR_LEN; 2732 struct iscsi_nopin *hdr; 2733 2734 hdr = (struct iscsi_nopin *) cmd->pdu; 2735 memset(hdr, 0, ISCSI_HDR_LEN); 2736 hdr->opcode = ISCSI_OP_NOOP_IN; 2737 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2738 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2739 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2740 cmd->stat_sn = conn->stat_sn; 2741 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2742 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2743 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2744 2745 if (conn->conn_ops->HeaderDigest) { 2746 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2747 2748 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2749 (unsigned char *)hdr, ISCSI_HDR_LEN, 2750 0, NULL, (u8 *)header_digest); 2751 2752 tx_size += ISCSI_CRC_LEN; 2753 pr_debug("Attaching CRC32C HeaderDigest to" 2754 " NopIN 0x%08x\n", *header_digest); 2755 } 2756 2757 cmd->iov_misc[0].iov_base = cmd->pdu; 2758 cmd->iov_misc[0].iov_len = tx_size; 2759 cmd->iov_misc_count = 1; 2760 cmd->tx_size = tx_size; 2761 2762 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" 2763 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); 2764 2765 return 0; 2766 } 2767 2768 static int iscsit_send_nopin_response( 2769 struct iscsi_cmd *cmd, 2770 struct iscsi_conn *conn) 2771 { 2772 int niov = 0, tx_size; 2773 u32 padding = 0; 2774 struct kvec *iov; 2775 struct iscsi_nopin *hdr; 2776 2777 tx_size = ISCSI_HDR_LEN; 2778 hdr = (struct iscsi_nopin *) cmd->pdu; 2779 memset(hdr, 0, ISCSI_HDR_LEN); 2780 hdr->opcode = ISCSI_OP_NOOP_IN; 2781 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2782 hton24(hdr->dlength, cmd->buf_ptr_size); 2783 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2784 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2785 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2786 cmd->stat_sn = conn->stat_sn++; 2787 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2788 2789 iscsit_increment_maxcmdsn(cmd, conn->sess); 2790 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2791 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2792 2793 iov = &cmd->iov_misc[0]; 2794 iov[niov].iov_base = cmd->pdu; 2795 iov[niov++].iov_len = ISCSI_HDR_LEN; 2796 2797 if (conn->conn_ops->HeaderDigest) { 2798 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2799 2800 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2801 (unsigned char *)hdr, ISCSI_HDR_LEN, 2802 0, NULL, (u8 *)header_digest); 2803 2804 iov[0].iov_len += ISCSI_CRC_LEN; 2805 tx_size += ISCSI_CRC_LEN; 2806 pr_debug("Attaching CRC32C HeaderDigest" 2807 " to NopIn 0x%08x\n", *header_digest); 2808 } 2809 2810 /* 2811 * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr. 2812 * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size. 2813 */ 2814 if (cmd->buf_ptr_size) { 2815 iov[niov].iov_base = cmd->buf_ptr; 2816 iov[niov++].iov_len = cmd->buf_ptr_size; 2817 tx_size += cmd->buf_ptr_size; 2818 2819 pr_debug("Echoing back %u bytes of ping" 2820 " data.\n", cmd->buf_ptr_size); 2821 2822 padding = ((-cmd->buf_ptr_size) & 3); 2823 if (padding != 0) { 2824 iov[niov].iov_base = &cmd->pad_bytes; 2825 iov[niov++].iov_len = padding; 2826 tx_size += padding; 2827 pr_debug("Attaching %u additional" 2828 " padding bytes.\n", padding); 2829 } 2830 if (conn->conn_ops->DataDigest) { 2831 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2832 cmd->buf_ptr, cmd->buf_ptr_size, 2833 padding, (u8 *)&cmd->pad_bytes, 2834 (u8 *)&cmd->data_crc); 2835 2836 iov[niov].iov_base = &cmd->data_crc; 2837 iov[niov++].iov_len = ISCSI_CRC_LEN; 2838 tx_size += ISCSI_CRC_LEN; 2839 pr_debug("Attached DataDigest for %u" 2840 " bytes of ping data, CRC 0x%08x\n", 2841 cmd->buf_ptr_size, cmd->data_crc); 2842 } 2843 } 2844 2845 cmd->iov_misc_count = niov; 2846 cmd->tx_size = tx_size; 2847 2848 pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:" 2849 " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag, 2850 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); 2851 2852 return 0; 2853 } 2854 2855 int iscsit_send_r2t( 2856 struct iscsi_cmd *cmd, 2857 struct iscsi_conn *conn) 2858 { 2859 int tx_size = 0; 2860 struct iscsi_r2t *r2t; 2861 struct iscsi_r2t_rsp *hdr; 2862 2863 r2t = iscsit_get_r2t_from_list(cmd); 2864 if (!r2t) 2865 return -1; 2866 2867 hdr = (struct iscsi_r2t_rsp *) cmd->pdu; 2868 memset(hdr, 0, ISCSI_HDR_LEN); 2869 hdr->opcode = ISCSI_OP_R2T; 2870 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2871 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2872 (struct scsi_lun *)&hdr->lun); 2873 hdr->itt = cpu_to_be32(cmd->init_task_tag); 2874 spin_lock_bh(&conn->sess->ttt_lock); 2875 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++; 2876 if (r2t->targ_xfer_tag == 0xFFFFFFFF) 2877 r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++; 2878 spin_unlock_bh(&conn->sess->ttt_lock); 2879 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); 2880 hdr->statsn = cpu_to_be32(conn->stat_sn); 2881 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2882 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 2883 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn); 2884 hdr->data_offset = cpu_to_be32(r2t->offset); 2885 hdr->data_length = cpu_to_be32(r2t->xfer_len); 2886 2887 cmd->iov_misc[0].iov_base = cmd->pdu; 2888 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; 2889 tx_size += ISCSI_HDR_LEN; 2890 2891 if (conn->conn_ops->HeaderDigest) { 2892 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2893 2894 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2895 (unsigned char *)hdr, ISCSI_HDR_LEN, 2896 0, NULL, (u8 *)header_digest); 2897 2898 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 2899 tx_size += ISCSI_CRC_LEN; 2900 pr_debug("Attaching CRC32 HeaderDigest for R2T" 2901 " PDU 0x%08x\n", *header_digest); 2902 } 2903 2904 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:" 2905 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n", 2906 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag, 2907 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn, 2908 r2t->offset, r2t->xfer_len, conn->cid); 2909 2910 cmd->iov_misc_count = 1; 2911 cmd->tx_size = tx_size; 2912 2913 spin_lock_bh(&cmd->r2t_lock); 2914 r2t->sent_r2t = 1; 2915 spin_unlock_bh(&cmd->r2t_lock); 2916 2917 return 0; 2918 } 2919 2920 /* 2921 * type 0: Normal Operation. 2922 * type 1: Called from Storage Transport. 2923 * type 2: Called from iscsi_task_reassign_complete_write() for 2924 * connection recovery. 2925 */ 2926 int iscsit_build_r2ts_for_cmd( 2927 struct iscsi_cmd *cmd, 2928 struct iscsi_conn *conn, 2929 int type) 2930 { 2931 int first_r2t = 1; 2932 u32 offset = 0, xfer_len = 0; 2933 2934 spin_lock_bh(&cmd->r2t_lock); 2935 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) { 2936 spin_unlock_bh(&cmd->r2t_lock); 2937 return 0; 2938 } 2939 2940 if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2)) 2941 if (cmd->r2t_offset < cmd->write_data_done) 2942 cmd->r2t_offset = cmd->write_data_done; 2943 2944 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) { 2945 if (conn->sess->sess_ops->DataSequenceInOrder) { 2946 offset = cmd->r2t_offset; 2947 2948 if (first_r2t && (type == 2)) { 2949 xfer_len = ((offset + 2950 (conn->sess->sess_ops->MaxBurstLength - 2951 cmd->next_burst_len) > 2952 cmd->data_length) ? 2953 (cmd->data_length - offset) : 2954 (conn->sess->sess_ops->MaxBurstLength - 2955 cmd->next_burst_len)); 2956 } else { 2957 xfer_len = ((offset + 2958 conn->sess->sess_ops->MaxBurstLength) > 2959 cmd->data_length) ? 2960 (cmd->data_length - offset) : 2961 conn->sess->sess_ops->MaxBurstLength; 2962 } 2963 cmd->r2t_offset += xfer_len; 2964 2965 if (cmd->r2t_offset == cmd->data_length) 2966 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 2967 } else { 2968 struct iscsi_seq *seq; 2969 2970 seq = iscsit_get_seq_holder_for_r2t(cmd); 2971 if (!seq) { 2972 spin_unlock_bh(&cmd->r2t_lock); 2973 return -1; 2974 } 2975 2976 offset = seq->offset; 2977 xfer_len = seq->xfer_len; 2978 2979 if (cmd->seq_send_order == cmd->seq_count) 2980 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 2981 } 2982 cmd->outstanding_r2ts++; 2983 first_r2t = 0; 2984 2985 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) { 2986 spin_unlock_bh(&cmd->r2t_lock); 2987 return -1; 2988 } 2989 2990 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) 2991 break; 2992 } 2993 spin_unlock_bh(&cmd->r2t_lock); 2994 2995 return 0; 2996 } 2997 2998 static int iscsit_send_status( 2999 struct iscsi_cmd *cmd, 3000 struct iscsi_conn *conn) 3001 { 3002 u8 iov_count = 0, recovery; 3003 u32 padding = 0, tx_size = 0; 3004 struct iscsi_scsi_rsp *hdr; 3005 struct kvec *iov; 3006 3007 recovery = (cmd->i_state != ISTATE_SEND_STATUS); 3008 if (!recovery) 3009 cmd->stat_sn = conn->stat_sn++; 3010 3011 spin_lock_bh(&conn->sess->session_stats_lock); 3012 conn->sess->rsp_pdus++; 3013 spin_unlock_bh(&conn->sess->session_stats_lock); 3014 3015 hdr = (struct iscsi_scsi_rsp *) cmd->pdu; 3016 memset(hdr, 0, ISCSI_HDR_LEN); 3017 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; 3018 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3019 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 3020 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW; 3021 hdr->residual_count = cpu_to_be32(cmd->residual_count); 3022 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 3023 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; 3024 hdr->residual_count = cpu_to_be32(cmd->residual_count); 3025 } 3026 hdr->response = cmd->iscsi_response; 3027 hdr->cmd_status = cmd->se_cmd.scsi_status; 3028 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3029 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3030 3031 iscsit_increment_maxcmdsn(cmd, conn->sess); 3032 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3033 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3034 3035 iov = &cmd->iov_misc[0]; 3036 iov[iov_count].iov_base = cmd->pdu; 3037 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3038 tx_size += ISCSI_HDR_LEN; 3039 3040 /* 3041 * Attach SENSE DATA payload to iSCSI Response PDU 3042 */ 3043 if (cmd->se_cmd.sense_buffer && 3044 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 3045 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 3046 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 3047 hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length); 3048 iov[iov_count].iov_base = cmd->se_cmd.sense_buffer; 3049 iov[iov_count++].iov_len = 3050 (cmd->se_cmd.scsi_sense_length + padding); 3051 tx_size += cmd->se_cmd.scsi_sense_length; 3052 3053 if (padding) { 3054 memset(cmd->se_cmd.sense_buffer + 3055 cmd->se_cmd.scsi_sense_length, 0, padding); 3056 tx_size += padding; 3057 pr_debug("Adding %u bytes of padding to" 3058 " SENSE.\n", padding); 3059 } 3060 3061 if (conn->conn_ops->DataDigest) { 3062 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3063 cmd->se_cmd.sense_buffer, 3064 (cmd->se_cmd.scsi_sense_length + padding), 3065 0, NULL, (u8 *)&cmd->data_crc); 3066 3067 iov[iov_count].iov_base = &cmd->data_crc; 3068 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3069 tx_size += ISCSI_CRC_LEN; 3070 3071 pr_debug("Attaching CRC32 DataDigest for" 3072 " SENSE, %u bytes CRC 0x%08x\n", 3073 (cmd->se_cmd.scsi_sense_length + padding), 3074 cmd->data_crc); 3075 } 3076 3077 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI" 3078 " Response PDU\n", 3079 cmd->se_cmd.scsi_sense_length); 3080 } 3081 3082 if (conn->conn_ops->HeaderDigest) { 3083 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3084 3085 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3086 (unsigned char *)hdr, ISCSI_HDR_LEN, 3087 0, NULL, (u8 *)header_digest); 3088 3089 iov[0].iov_len += ISCSI_CRC_LEN; 3090 tx_size += ISCSI_CRC_LEN; 3091 pr_debug("Attaching CRC32 HeaderDigest for Response" 3092 " PDU 0x%08x\n", *header_digest); 3093 } 3094 3095 cmd->iov_misc_count = iov_count; 3096 cmd->tx_size = tx_size; 3097 3098 pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x," 3099 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n", 3100 (!recovery) ? "" : "Recovery ", cmd->init_task_tag, 3101 cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid); 3102 3103 return 0; 3104 } 3105 3106 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr) 3107 { 3108 switch (se_tmr->response) { 3109 case TMR_FUNCTION_COMPLETE: 3110 return ISCSI_TMF_RSP_COMPLETE; 3111 case TMR_TASK_DOES_NOT_EXIST: 3112 return ISCSI_TMF_RSP_NO_TASK; 3113 case TMR_LUN_DOES_NOT_EXIST: 3114 return ISCSI_TMF_RSP_NO_LUN; 3115 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: 3116 return ISCSI_TMF_RSP_NOT_SUPPORTED; 3117 case TMR_FUNCTION_AUTHORIZATION_FAILED: 3118 return ISCSI_TMF_RSP_AUTH_FAILED; 3119 case TMR_FUNCTION_REJECTED: 3120 default: 3121 return ISCSI_TMF_RSP_REJECTED; 3122 } 3123 } 3124 3125 static int iscsit_send_task_mgt_rsp( 3126 struct iscsi_cmd *cmd, 3127 struct iscsi_conn *conn) 3128 { 3129 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; 3130 struct iscsi_tm_rsp *hdr; 3131 u32 tx_size = 0; 3132 3133 hdr = (struct iscsi_tm_rsp *) cmd->pdu; 3134 memset(hdr, 0, ISCSI_HDR_LEN); 3135 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 3136 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); 3137 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3138 cmd->stat_sn = conn->stat_sn++; 3139 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3140 3141 iscsit_increment_maxcmdsn(cmd, conn->sess); 3142 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3143 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3144 3145 cmd->iov_misc[0].iov_base = cmd->pdu; 3146 cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN; 3147 tx_size += ISCSI_HDR_LEN; 3148 3149 if (conn->conn_ops->HeaderDigest) { 3150 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3151 3152 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3153 (unsigned char *)hdr, ISCSI_HDR_LEN, 3154 0, NULL, (u8 *)header_digest); 3155 3156 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 3157 tx_size += ISCSI_CRC_LEN; 3158 pr_debug("Attaching CRC32 HeaderDigest for Task" 3159 " Mgmt Response PDU 0x%08x\n", *header_digest); 3160 } 3161 3162 cmd->iov_misc_count = 1; 3163 cmd->tx_size = tx_size; 3164 3165 pr_debug("Built Task Management Response ITT: 0x%08x," 3166 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n", 3167 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid); 3168 3169 return 0; 3170 } 3171 3172 static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) 3173 { 3174 char *payload = NULL; 3175 struct iscsi_conn *conn = cmd->conn; 3176 struct iscsi_portal_group *tpg; 3177 struct iscsi_tiqn *tiqn; 3178 struct iscsi_tpg_np *tpg_np; 3179 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; 3180 unsigned char buf[256]; 3181 3182 buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ? 3183 32768 : conn->conn_ops->MaxRecvDataSegmentLength; 3184 3185 memset(buf, 0, 256); 3186 3187 payload = kzalloc(buffer_len, GFP_KERNEL); 3188 if (!payload) { 3189 pr_err("Unable to allocate memory for sendtargets" 3190 " response.\n"); 3191 return -ENOMEM; 3192 } 3193 3194 spin_lock(&tiqn_lock); 3195 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 3196 len = sprintf(buf, "TargetName=%s", tiqn->tiqn); 3197 len += 1; 3198 3199 if ((len + payload_len) > buffer_len) { 3200 spin_unlock(&tiqn->tiqn_tpg_lock); 3201 end_of_buf = 1; 3202 goto eob; 3203 } 3204 memcpy((void *)payload + payload_len, buf, len); 3205 payload_len += len; 3206 3207 spin_lock(&tiqn->tiqn_tpg_lock); 3208 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { 3209 3210 spin_lock(&tpg->tpg_state_lock); 3211 if ((tpg->tpg_state == TPG_STATE_FREE) || 3212 (tpg->tpg_state == TPG_STATE_INACTIVE)) { 3213 spin_unlock(&tpg->tpg_state_lock); 3214 continue; 3215 } 3216 spin_unlock(&tpg->tpg_state_lock); 3217 3218 spin_lock(&tpg->tpg_np_lock); 3219 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, 3220 tpg_np_list) { 3221 len = sprintf(buf, "TargetAddress=" 3222 "%s%s%s:%hu,%hu", 3223 (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ? 3224 "[" : "", tpg_np->tpg_np->np_ip, 3225 (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ? 3226 "]" : "", tpg_np->tpg_np->np_port, 3227 tpg->tpgt); 3228 len += 1; 3229 3230 if ((len + payload_len) > buffer_len) { 3231 spin_unlock(&tpg->tpg_np_lock); 3232 spin_unlock(&tiqn->tiqn_tpg_lock); 3233 end_of_buf = 1; 3234 goto eob; 3235 } 3236 memcpy((void *)payload + payload_len, buf, len); 3237 payload_len += len; 3238 } 3239 spin_unlock(&tpg->tpg_np_lock); 3240 } 3241 spin_unlock(&tiqn->tiqn_tpg_lock); 3242 eob: 3243 if (end_of_buf) 3244 break; 3245 } 3246 spin_unlock(&tiqn_lock); 3247 3248 cmd->buf_ptr = payload; 3249 3250 return payload_len; 3251 } 3252 3253 /* 3254 * FIXME: Add support for F_BIT and C_BIT when the length is longer than 3255 * MaxRecvDataSegmentLength. 3256 */ 3257 static int iscsit_send_text_rsp( 3258 struct iscsi_cmd *cmd, 3259 struct iscsi_conn *conn) 3260 { 3261 struct iscsi_text_rsp *hdr; 3262 struct kvec *iov; 3263 u32 padding = 0, tx_size = 0; 3264 int text_length, iov_count = 0; 3265 3266 text_length = iscsit_build_sendtargets_response(cmd); 3267 if (text_length < 0) 3268 return text_length; 3269 3270 padding = ((-text_length) & 3); 3271 if (padding != 0) { 3272 memset(cmd->buf_ptr + text_length, 0, padding); 3273 pr_debug("Attaching %u additional bytes for" 3274 " padding.\n", padding); 3275 } 3276 3277 hdr = (struct iscsi_text_rsp *) cmd->pdu; 3278 memset(hdr, 0, ISCSI_HDR_LEN); 3279 hdr->opcode = ISCSI_OP_TEXT_RSP; 3280 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3281 hton24(hdr->dlength, text_length); 3282 hdr->itt = cpu_to_be32(cmd->init_task_tag); 3283 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 3284 cmd->stat_sn = conn->stat_sn++; 3285 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3286 3287 iscsit_increment_maxcmdsn(cmd, conn->sess); 3288 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3289 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3290 3291 iov = &cmd->iov_misc[0]; 3292 3293 iov[iov_count].iov_base = cmd->pdu; 3294 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3295 iov[iov_count].iov_base = cmd->buf_ptr; 3296 iov[iov_count++].iov_len = text_length + padding; 3297 3298 tx_size += (ISCSI_HDR_LEN + text_length + padding); 3299 3300 if (conn->conn_ops->HeaderDigest) { 3301 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3302 3303 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3304 (unsigned char *)hdr, ISCSI_HDR_LEN, 3305 0, NULL, (u8 *)header_digest); 3306 3307 iov[0].iov_len += ISCSI_CRC_LEN; 3308 tx_size += ISCSI_CRC_LEN; 3309 pr_debug("Attaching CRC32 HeaderDigest for" 3310 " Text Response PDU 0x%08x\n", *header_digest); 3311 } 3312 3313 if (conn->conn_ops->DataDigest) { 3314 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3315 cmd->buf_ptr, (text_length + padding), 3316 0, NULL, (u8 *)&cmd->data_crc); 3317 3318 iov[iov_count].iov_base = &cmd->data_crc; 3319 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3320 tx_size += ISCSI_CRC_LEN; 3321 3322 pr_debug("Attaching DataDigest for %u bytes of text" 3323 " data, CRC 0x%08x\n", (text_length + padding), 3324 cmd->data_crc); 3325 } 3326 3327 cmd->iov_misc_count = iov_count; 3328 cmd->tx_size = tx_size; 3329 3330 pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x," 3331 " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn, 3332 text_length, conn->cid); 3333 return 0; 3334 } 3335 3336 static int iscsit_send_reject( 3337 struct iscsi_cmd *cmd, 3338 struct iscsi_conn *conn) 3339 { 3340 u32 iov_count = 0, tx_size = 0; 3341 struct iscsi_reject *hdr; 3342 struct kvec *iov; 3343 3344 hdr = (struct iscsi_reject *) cmd->pdu; 3345 hdr->opcode = ISCSI_OP_REJECT; 3346 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3347 hton24(hdr->dlength, ISCSI_HDR_LEN); 3348 cmd->stat_sn = conn->stat_sn++; 3349 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3350 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3351 hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); 3352 3353 iov = &cmd->iov_misc[0]; 3354 3355 iov[iov_count].iov_base = cmd->pdu; 3356 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3357 iov[iov_count].iov_base = cmd->buf_ptr; 3358 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 3359 3360 tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN); 3361 3362 if (conn->conn_ops->HeaderDigest) { 3363 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3364 3365 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3366 (unsigned char *)hdr, ISCSI_HDR_LEN, 3367 0, NULL, (u8 *)header_digest); 3368 3369 iov[0].iov_len += ISCSI_CRC_LEN; 3370 tx_size += ISCSI_CRC_LEN; 3371 pr_debug("Attaching CRC32 HeaderDigest for" 3372 " REJECT PDU 0x%08x\n", *header_digest); 3373 } 3374 3375 if (conn->conn_ops->DataDigest) { 3376 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3377 (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN, 3378 0, NULL, (u8 *)&cmd->data_crc); 3379 3380 iov[iov_count].iov_base = &cmd->data_crc; 3381 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3382 tx_size += ISCSI_CRC_LEN; 3383 pr_debug("Attaching CRC32 DataDigest for REJECT" 3384 " PDU 0x%08x\n", cmd->data_crc); 3385 } 3386 3387 cmd->iov_misc_count = iov_count; 3388 cmd->tx_size = tx_size; 3389 3390 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x," 3391 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid); 3392 3393 return 0; 3394 } 3395 3396 static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn) 3397 { 3398 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || 3399 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { 3400 wait_for_completion_interruptible_timeout( 3401 &conn->tx_half_close_comp, 3402 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ); 3403 } 3404 } 3405 3406 #ifdef CONFIG_SMP 3407 3408 void iscsit_thread_get_cpumask(struct iscsi_conn *conn) 3409 { 3410 struct iscsi_thread_set *ts = conn->thread_set; 3411 int ord, cpu; 3412 /* 3413 * thread_id is assigned from iscsit_global->ts_bitmap from 3414 * within iscsi_thread_set.c:iscsi_allocate_thread_sets() 3415 * 3416 * Here we use thread_id to determine which CPU that this 3417 * iSCSI connection's iscsi_thread_set will be scheduled to 3418 * execute upon. 3419 */ 3420 ord = ts->thread_id % cpumask_weight(cpu_online_mask); 3421 #if 0 3422 pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from" 3423 " thread_id: %d\n", ord, ts->thread_id); 3424 #endif 3425 for_each_online_cpu(cpu) { 3426 if (ord-- == 0) { 3427 cpumask_set_cpu(cpu, conn->conn_cpumask); 3428 return; 3429 } 3430 } 3431 /* 3432 * This should never be reached.. 3433 */ 3434 dump_stack(); 3435 cpumask_setall(conn->conn_cpumask); 3436 } 3437 3438 static inline void iscsit_thread_check_cpumask( 3439 struct iscsi_conn *conn, 3440 struct task_struct *p, 3441 int mode) 3442 { 3443 char buf[128]; 3444 /* 3445 * mode == 1 signals iscsi_target_tx_thread() usage. 3446 * mode == 0 signals iscsi_target_rx_thread() usage. 3447 */ 3448 if (mode == 1) { 3449 if (!conn->conn_tx_reset_cpumask) 3450 return; 3451 conn->conn_tx_reset_cpumask = 0; 3452 } else { 3453 if (!conn->conn_rx_reset_cpumask) 3454 return; 3455 conn->conn_rx_reset_cpumask = 0; 3456 } 3457 /* 3458 * Update the CPU mask for this single kthread so that 3459 * both TX and RX kthreads are scheduled to run on the 3460 * same CPU. 3461 */ 3462 memset(buf, 0, 128); 3463 cpumask_scnprintf(buf, 128, conn->conn_cpumask); 3464 #if 0 3465 pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():" 3466 " %s for %s\n", buf, p->comm); 3467 #endif 3468 set_cpus_allowed_ptr(p, conn->conn_cpumask); 3469 } 3470 3471 #else 3472 3473 void iscsit_thread_get_cpumask(struct iscsi_conn *conn) 3474 { 3475 return; 3476 } 3477 3478 #define iscsit_thread_check_cpumask(X, Y, Z) ({}) 3479 #endif /* CONFIG_SMP */ 3480 3481 int iscsi_target_tx_thread(void *arg) 3482 { 3483 u8 state; 3484 int eodr = 0; 3485 int ret = 0; 3486 int sent_status = 0; 3487 int use_misc = 0; 3488 int map_sg = 0; 3489 struct iscsi_cmd *cmd = NULL; 3490 struct iscsi_conn *conn; 3491 struct iscsi_queue_req *qr = NULL; 3492 struct se_cmd *se_cmd; 3493 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg; 3494 /* 3495 * Allow ourselves to be interrupted by SIGINT so that a 3496 * connection recovery / failure event can be triggered externally. 3497 */ 3498 allow_signal(SIGINT); 3499 3500 restart: 3501 conn = iscsi_tx_thread_pre_handler(ts); 3502 if (!conn) 3503 goto out; 3504 3505 eodr = map_sg = ret = sent_status = use_misc = 0; 3506 3507 while (!kthread_should_stop()) { 3508 /* 3509 * Ensure that both TX and RX per connection kthreads 3510 * are scheduled to run on the same CPU. 3511 */ 3512 iscsit_thread_check_cpumask(conn, current, 1); 3513 3514 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); 3515 3516 if ((ts->status == ISCSI_THREAD_SET_RESET) || 3517 signal_pending(current)) 3518 goto transport_err; 3519 3520 get_immediate: 3521 qr = iscsit_get_cmd_from_immediate_queue(conn); 3522 if (qr) { 3523 atomic_set(&conn->check_immediate_queue, 0); 3524 cmd = qr->cmd; 3525 state = qr->state; 3526 kmem_cache_free(lio_qr_cache, qr); 3527 3528 spin_lock_bh(&cmd->istate_lock); 3529 switch (state) { 3530 case ISTATE_SEND_R2T: 3531 spin_unlock_bh(&cmd->istate_lock); 3532 ret = iscsit_send_r2t(cmd, conn); 3533 break; 3534 case ISTATE_REMOVE: 3535 spin_unlock_bh(&cmd->istate_lock); 3536 3537 if (cmd->data_direction == DMA_TO_DEVICE) 3538 iscsit_stop_dataout_timer(cmd); 3539 3540 spin_lock_bh(&conn->cmd_lock); 3541 list_del(&cmd->i_list); 3542 spin_unlock_bh(&conn->cmd_lock); 3543 3544 iscsit_free_cmd(cmd); 3545 goto get_immediate; 3546 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3547 spin_unlock_bh(&cmd->istate_lock); 3548 iscsit_mod_nopin_response_timer(conn); 3549 ret = iscsit_send_unsolicited_nopin(cmd, 3550 conn, 1); 3551 break; 3552 case ISTATE_SEND_NOPIN_NO_RESPONSE: 3553 spin_unlock_bh(&cmd->istate_lock); 3554 ret = iscsit_send_unsolicited_nopin(cmd, 3555 conn, 0); 3556 break; 3557 default: 3558 pr_err("Unknown Opcode: 0x%02x ITT:" 3559 " 0x%08x, i_state: %d on CID: %hu\n", 3560 cmd->iscsi_opcode, cmd->init_task_tag, state, 3561 conn->cid); 3562 spin_unlock_bh(&cmd->istate_lock); 3563 goto transport_err; 3564 } 3565 if (ret < 0) { 3566 conn->tx_immediate_queue = 0; 3567 goto transport_err; 3568 } 3569 3570 if (iscsit_send_tx_data(cmd, conn, 1) < 0) { 3571 conn->tx_immediate_queue = 0; 3572 iscsit_tx_thread_wait_for_tcp(conn); 3573 goto transport_err; 3574 } 3575 3576 spin_lock_bh(&cmd->istate_lock); 3577 switch (state) { 3578 case ISTATE_SEND_R2T: 3579 spin_unlock_bh(&cmd->istate_lock); 3580 spin_lock_bh(&cmd->dataout_timeout_lock); 3581 iscsit_start_dataout_timer(cmd, conn); 3582 spin_unlock_bh(&cmd->dataout_timeout_lock); 3583 break; 3584 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3585 cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE; 3586 spin_unlock_bh(&cmd->istate_lock); 3587 break; 3588 case ISTATE_SEND_NOPIN_NO_RESPONSE: 3589 cmd->i_state = ISTATE_SENT_STATUS; 3590 spin_unlock_bh(&cmd->istate_lock); 3591 break; 3592 default: 3593 pr_err("Unknown Opcode: 0x%02x ITT:" 3594 " 0x%08x, i_state: %d on CID: %hu\n", 3595 cmd->iscsi_opcode, cmd->init_task_tag, 3596 state, conn->cid); 3597 spin_unlock_bh(&cmd->istate_lock); 3598 goto transport_err; 3599 } 3600 goto get_immediate; 3601 } else 3602 conn->tx_immediate_queue = 0; 3603 3604 get_response: 3605 qr = iscsit_get_cmd_from_response_queue(conn); 3606 if (qr) { 3607 cmd = qr->cmd; 3608 state = qr->state; 3609 kmem_cache_free(lio_qr_cache, qr); 3610 3611 spin_lock_bh(&cmd->istate_lock); 3612 check_rsp_state: 3613 switch (state) { 3614 case ISTATE_SEND_DATAIN: 3615 spin_unlock_bh(&cmd->istate_lock); 3616 ret = iscsit_send_data_in(cmd, conn, 3617 &eodr); 3618 map_sg = 1; 3619 break; 3620 case ISTATE_SEND_STATUS: 3621 case ISTATE_SEND_STATUS_RECOVERY: 3622 spin_unlock_bh(&cmd->istate_lock); 3623 use_misc = 1; 3624 ret = iscsit_send_status(cmd, conn); 3625 break; 3626 case ISTATE_SEND_LOGOUTRSP: 3627 spin_unlock_bh(&cmd->istate_lock); 3628 use_misc = 1; 3629 ret = iscsit_send_logout_response(cmd, conn); 3630 break; 3631 case ISTATE_SEND_ASYNCMSG: 3632 spin_unlock_bh(&cmd->istate_lock); 3633 use_misc = 1; 3634 ret = iscsit_send_conn_drop_async_message( 3635 cmd, conn); 3636 break; 3637 case ISTATE_SEND_NOPIN: 3638 spin_unlock_bh(&cmd->istate_lock); 3639 use_misc = 1; 3640 ret = iscsit_send_nopin_response(cmd, conn); 3641 break; 3642 case ISTATE_SEND_REJECT: 3643 spin_unlock_bh(&cmd->istate_lock); 3644 use_misc = 1; 3645 ret = iscsit_send_reject(cmd, conn); 3646 break; 3647 case ISTATE_SEND_TASKMGTRSP: 3648 spin_unlock_bh(&cmd->istate_lock); 3649 use_misc = 1; 3650 ret = iscsit_send_task_mgt_rsp(cmd, conn); 3651 if (ret != 0) 3652 break; 3653 ret = iscsit_tmr_post_handler(cmd, conn); 3654 if (ret != 0) 3655 iscsit_fall_back_to_erl0(conn->sess); 3656 break; 3657 case ISTATE_SEND_TEXTRSP: 3658 spin_unlock_bh(&cmd->istate_lock); 3659 use_misc = 1; 3660 ret = iscsit_send_text_rsp(cmd, conn); 3661 break; 3662 default: 3663 pr_err("Unknown Opcode: 0x%02x ITT:" 3664 " 0x%08x, i_state: %d on CID: %hu\n", 3665 cmd->iscsi_opcode, cmd->init_task_tag, 3666 state, conn->cid); 3667 spin_unlock_bh(&cmd->istate_lock); 3668 goto transport_err; 3669 } 3670 if (ret < 0) { 3671 conn->tx_response_queue = 0; 3672 goto transport_err; 3673 } 3674 3675 se_cmd = &cmd->se_cmd; 3676 3677 if (map_sg && !conn->conn_ops->IFMarker) { 3678 if (iscsit_fe_sendpage_sg(cmd, conn) < 0) { 3679 conn->tx_response_queue = 0; 3680 iscsit_tx_thread_wait_for_tcp(conn); 3681 iscsit_unmap_iovec(cmd); 3682 goto transport_err; 3683 } 3684 } else { 3685 if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) { 3686 conn->tx_response_queue = 0; 3687 iscsit_tx_thread_wait_for_tcp(conn); 3688 iscsit_unmap_iovec(cmd); 3689 goto transport_err; 3690 } 3691 } 3692 map_sg = 0; 3693 iscsit_unmap_iovec(cmd); 3694 3695 spin_lock_bh(&cmd->istate_lock); 3696 switch (state) { 3697 case ISTATE_SEND_DATAIN: 3698 if (!eodr) 3699 goto check_rsp_state; 3700 3701 if (eodr == 1) { 3702 cmd->i_state = ISTATE_SENT_LAST_DATAIN; 3703 sent_status = 1; 3704 eodr = use_misc = 0; 3705 } else if (eodr == 2) { 3706 cmd->i_state = state = 3707 ISTATE_SEND_STATUS; 3708 sent_status = 0; 3709 eodr = use_misc = 0; 3710 goto check_rsp_state; 3711 } 3712 break; 3713 case ISTATE_SEND_STATUS: 3714 use_misc = 0; 3715 sent_status = 1; 3716 break; 3717 case ISTATE_SEND_ASYNCMSG: 3718 case ISTATE_SEND_NOPIN: 3719 case ISTATE_SEND_STATUS_RECOVERY: 3720 case ISTATE_SEND_TEXTRSP: 3721 use_misc = 0; 3722 sent_status = 1; 3723 break; 3724 case ISTATE_SEND_REJECT: 3725 use_misc = 0; 3726 if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) { 3727 cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN; 3728 spin_unlock_bh(&cmd->istate_lock); 3729 complete(&cmd->reject_comp); 3730 goto transport_err; 3731 } 3732 complete(&cmd->reject_comp); 3733 break; 3734 case ISTATE_SEND_TASKMGTRSP: 3735 use_misc = 0; 3736 sent_status = 1; 3737 break; 3738 case ISTATE_SEND_LOGOUTRSP: 3739 spin_unlock_bh(&cmd->istate_lock); 3740 if (!iscsit_logout_post_handler(cmd, conn)) 3741 goto restart; 3742 spin_lock_bh(&cmd->istate_lock); 3743 use_misc = 0; 3744 sent_status = 1; 3745 break; 3746 default: 3747 pr_err("Unknown Opcode: 0x%02x ITT:" 3748 " 0x%08x, i_state: %d on CID: %hu\n", 3749 cmd->iscsi_opcode, cmd->init_task_tag, 3750 cmd->i_state, conn->cid); 3751 spin_unlock_bh(&cmd->istate_lock); 3752 goto transport_err; 3753 } 3754 3755 if (sent_status) { 3756 cmd->i_state = ISTATE_SENT_STATUS; 3757 sent_status = 0; 3758 } 3759 spin_unlock_bh(&cmd->istate_lock); 3760 3761 if (atomic_read(&conn->check_immediate_queue)) 3762 goto get_immediate; 3763 3764 goto get_response; 3765 } else 3766 conn->tx_response_queue = 0; 3767 } 3768 3769 transport_err: 3770 iscsit_take_action_for_connection_exit(conn); 3771 goto restart; 3772 out: 3773 return 0; 3774 } 3775 3776 int iscsi_target_rx_thread(void *arg) 3777 { 3778 int ret; 3779 u8 buffer[ISCSI_HDR_LEN], opcode; 3780 u32 checksum = 0, digest = 0; 3781 struct iscsi_conn *conn = NULL; 3782 struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg; 3783 struct kvec iov; 3784 /* 3785 * Allow ourselves to be interrupted by SIGINT so that a 3786 * connection recovery / failure event can be triggered externally. 3787 */ 3788 allow_signal(SIGINT); 3789 3790 restart: 3791 conn = iscsi_rx_thread_pre_handler(ts); 3792 if (!conn) 3793 goto out; 3794 3795 while (!kthread_should_stop()) { 3796 /* 3797 * Ensure that both TX and RX per connection kthreads 3798 * are scheduled to run on the same CPU. 3799 */ 3800 iscsit_thread_check_cpumask(conn, current, 0); 3801 3802 memset(buffer, 0, ISCSI_HDR_LEN); 3803 memset(&iov, 0, sizeof(struct kvec)); 3804 3805 iov.iov_base = buffer; 3806 iov.iov_len = ISCSI_HDR_LEN; 3807 3808 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); 3809 if (ret != ISCSI_HDR_LEN) { 3810 iscsit_rx_thread_wait_for_tcp(conn); 3811 goto transport_err; 3812 } 3813 3814 /* 3815 * Set conn->bad_hdr for use with REJECT PDUs. 3816 */ 3817 memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN); 3818 3819 if (conn->conn_ops->HeaderDigest) { 3820 iov.iov_base = &digest; 3821 iov.iov_len = ISCSI_CRC_LEN; 3822 3823 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); 3824 if (ret != ISCSI_CRC_LEN) { 3825 iscsit_rx_thread_wait_for_tcp(conn); 3826 goto transport_err; 3827 } 3828 3829 iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, 3830 buffer, ISCSI_HDR_LEN, 3831 0, NULL, (u8 *)&checksum); 3832 3833 if (digest != checksum) { 3834 pr_err("HeaderDigest CRC32C failed," 3835 " received 0x%08x, computed 0x%08x\n", 3836 digest, checksum); 3837 /* 3838 * Set the PDU to 0xff so it will intentionally 3839 * hit default in the switch below. 3840 */ 3841 memset(buffer, 0xff, ISCSI_HDR_LEN); 3842 spin_lock_bh(&conn->sess->session_stats_lock); 3843 conn->sess->conn_digest_errors++; 3844 spin_unlock_bh(&conn->sess->session_stats_lock); 3845 } else { 3846 pr_debug("Got HeaderDigest CRC32C" 3847 " 0x%08x\n", checksum); 3848 } 3849 } 3850 3851 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) 3852 goto transport_err; 3853 3854 opcode = buffer[0] & ISCSI_OPCODE_MASK; 3855 3856 if (conn->sess->sess_ops->SessionType && 3857 ((!(opcode & ISCSI_OP_TEXT)) || 3858 (!(opcode & ISCSI_OP_LOGOUT)))) { 3859 pr_err("Received illegal iSCSI Opcode: 0x%02x" 3860 " while in Discovery Session, rejecting.\n", opcode); 3861 iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1, 3862 buffer, conn); 3863 goto transport_err; 3864 } 3865 3866 switch (opcode) { 3867 case ISCSI_OP_SCSI_CMD: 3868 if (iscsit_handle_scsi_cmd(conn, buffer) < 0) 3869 goto transport_err; 3870 break; 3871 case ISCSI_OP_SCSI_DATA_OUT: 3872 if (iscsit_handle_data_out(conn, buffer) < 0) 3873 goto transport_err; 3874 break; 3875 case ISCSI_OP_NOOP_OUT: 3876 if (iscsit_handle_nop_out(conn, buffer) < 0) 3877 goto transport_err; 3878 break; 3879 case ISCSI_OP_SCSI_TMFUNC: 3880 if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0) 3881 goto transport_err; 3882 break; 3883 case ISCSI_OP_TEXT: 3884 if (iscsit_handle_text_cmd(conn, buffer) < 0) 3885 goto transport_err; 3886 break; 3887 case ISCSI_OP_LOGOUT: 3888 ret = iscsit_handle_logout_cmd(conn, buffer); 3889 if (ret > 0) { 3890 wait_for_completion_timeout(&conn->conn_logout_comp, 3891 SECONDS_FOR_LOGOUT_COMP * HZ); 3892 goto transport_err; 3893 } else if (ret < 0) 3894 goto transport_err; 3895 break; 3896 case ISCSI_OP_SNACK: 3897 if (iscsit_handle_snack(conn, buffer) < 0) 3898 goto transport_err; 3899 break; 3900 default: 3901 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", 3902 opcode); 3903 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 3904 pr_err("Cannot recover from unknown" 3905 " opcode while ERL=0, closing iSCSI connection" 3906 ".\n"); 3907 goto transport_err; 3908 } 3909 if (!conn->conn_ops->OFMarker) { 3910 pr_err("Unable to recover from unknown" 3911 " opcode while OFMarker=No, closing iSCSI" 3912 " connection.\n"); 3913 goto transport_err; 3914 } 3915 if (iscsit_recover_from_unknown_opcode(conn) < 0) { 3916 pr_err("Unable to recover from unknown" 3917 " opcode, closing iSCSI connection.\n"); 3918 goto transport_err; 3919 } 3920 break; 3921 } 3922 } 3923 3924 transport_err: 3925 if (!signal_pending(current)) 3926 atomic_set(&conn->transport_failed, 1); 3927 iscsit_take_action_for_connection_exit(conn); 3928 goto restart; 3929 out: 3930 return 0; 3931 } 3932 3933 static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) 3934 { 3935 struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL; 3936 struct iscsi_session *sess = conn->sess; 3937 /* 3938 * We expect this function to only ever be called from either RX or TX 3939 * thread context via iscsit_close_connection() once the other context 3940 * has been reset -> returned sleeping pre-handler state. 3941 */ 3942 spin_lock_bh(&conn->cmd_lock); 3943 list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { 3944 3945 list_del(&cmd->i_list); 3946 spin_unlock_bh(&conn->cmd_lock); 3947 3948 iscsit_increment_maxcmdsn(cmd, sess); 3949 3950 iscsit_free_cmd(cmd); 3951 3952 spin_lock_bh(&conn->cmd_lock); 3953 } 3954 spin_unlock_bh(&conn->cmd_lock); 3955 } 3956 3957 static void iscsit_stop_timers_for_cmds( 3958 struct iscsi_conn *conn) 3959 { 3960 struct iscsi_cmd *cmd; 3961 3962 spin_lock_bh(&conn->cmd_lock); 3963 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 3964 if (cmd->data_direction == DMA_TO_DEVICE) 3965 iscsit_stop_dataout_timer(cmd); 3966 } 3967 spin_unlock_bh(&conn->cmd_lock); 3968 } 3969 3970 int iscsit_close_connection( 3971 struct iscsi_conn *conn) 3972 { 3973 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT); 3974 struct iscsi_session *sess = conn->sess; 3975 3976 pr_debug("Closing iSCSI connection CID %hu on SID:" 3977 " %u\n", conn->cid, sess->sid); 3978 /* 3979 * Always up conn_logout_comp just in case the RX Thread is sleeping 3980 * and the logout response never got sent because the connection 3981 * failed. 3982 */ 3983 complete(&conn->conn_logout_comp); 3984 3985 iscsi_release_thread_set(conn); 3986 3987 iscsit_stop_timers_for_cmds(conn); 3988 iscsit_stop_nopin_response_timer(conn); 3989 iscsit_stop_nopin_timer(conn); 3990 iscsit_free_queue_reqs_for_conn(conn); 3991 3992 /* 3993 * During Connection recovery drop unacknowledged out of order 3994 * commands for this connection, and prepare the other commands 3995 * for realligence. 3996 * 3997 * During normal operation clear the out of order commands (but 3998 * do not free the struct iscsi_ooo_cmdsn's) and release all 3999 * struct iscsi_cmds. 4000 */ 4001 if (atomic_read(&conn->connection_recovery)) { 4002 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn); 4003 iscsit_prepare_cmds_for_realligance(conn); 4004 } else { 4005 iscsit_clear_ooo_cmdsns_for_conn(conn); 4006 iscsit_release_commands_from_conn(conn); 4007 } 4008 4009 /* 4010 * Handle decrementing session or connection usage count if 4011 * a logout response was not able to be sent because the 4012 * connection failed. Fall back to Session Recovery here. 4013 */ 4014 if (atomic_read(&conn->conn_logout_remove)) { 4015 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) { 4016 iscsit_dec_conn_usage_count(conn); 4017 iscsit_dec_session_usage_count(sess); 4018 } 4019 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) 4020 iscsit_dec_conn_usage_count(conn); 4021 4022 atomic_set(&conn->conn_logout_remove, 0); 4023 atomic_set(&sess->session_reinstatement, 0); 4024 atomic_set(&sess->session_fall_back_to_erl0, 1); 4025 } 4026 4027 spin_lock_bh(&sess->conn_lock); 4028 list_del(&conn->conn_list); 4029 4030 /* 4031 * Attempt to let the Initiator know this connection failed by 4032 * sending an Connection Dropped Async Message on another 4033 * active connection. 4034 */ 4035 if (atomic_read(&conn->connection_recovery)) 4036 iscsit_build_conn_drop_async_message(conn); 4037 4038 spin_unlock_bh(&sess->conn_lock); 4039 4040 /* 4041 * If connection reinstatement is being performed on this connection, 4042 * up the connection reinstatement semaphore that is being blocked on 4043 * in iscsit_cause_connection_reinstatement(). 4044 */ 4045 spin_lock_bh(&conn->state_lock); 4046 if (atomic_read(&conn->sleep_on_conn_wait_comp)) { 4047 spin_unlock_bh(&conn->state_lock); 4048 complete(&conn->conn_wait_comp); 4049 wait_for_completion(&conn->conn_post_wait_comp); 4050 spin_lock_bh(&conn->state_lock); 4051 } 4052 4053 /* 4054 * If connection reinstatement is being performed on this connection 4055 * by receiving a REMOVECONNFORRECOVERY logout request, up the 4056 * connection wait rcfr semaphore that is being blocked on 4057 * an iscsit_connection_reinstatement_rcfr(). 4058 */ 4059 if (atomic_read(&conn->connection_wait_rcfr)) { 4060 spin_unlock_bh(&conn->state_lock); 4061 complete(&conn->conn_wait_rcfr_comp); 4062 wait_for_completion(&conn->conn_post_wait_comp); 4063 spin_lock_bh(&conn->state_lock); 4064 } 4065 atomic_set(&conn->connection_reinstatement, 1); 4066 spin_unlock_bh(&conn->state_lock); 4067 4068 /* 4069 * If any other processes are accessing this connection pointer we 4070 * must wait until they have completed. 4071 */ 4072 iscsit_check_conn_usage_count(conn); 4073 4074 if (conn->conn_rx_hash.tfm) 4075 crypto_free_hash(conn->conn_rx_hash.tfm); 4076 if (conn->conn_tx_hash.tfm) 4077 crypto_free_hash(conn->conn_tx_hash.tfm); 4078 4079 if (conn->conn_cpumask) 4080 free_cpumask_var(conn->conn_cpumask); 4081 4082 kfree(conn->conn_ops); 4083 conn->conn_ops = NULL; 4084 4085 if (conn->sock) { 4086 if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) { 4087 kfree(conn->sock->file); 4088 conn->sock->file = NULL; 4089 } 4090 sock_release(conn->sock); 4091 } 4092 conn->thread_set = NULL; 4093 4094 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 4095 conn->conn_state = TARG_CONN_STATE_FREE; 4096 kfree(conn); 4097 4098 spin_lock_bh(&sess->conn_lock); 4099 atomic_dec(&sess->nconn); 4100 pr_debug("Decremented iSCSI connection count to %hu from node:" 4101 " %s\n", atomic_read(&sess->nconn), 4102 sess->sess_ops->InitiatorName); 4103 /* 4104 * Make sure that if one connection fails in an non ERL=2 iSCSI 4105 * Session that they all fail. 4106 */ 4107 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout && 4108 !atomic_read(&sess->session_logout)) 4109 atomic_set(&sess->session_fall_back_to_erl0, 1); 4110 4111 /* 4112 * If this was not the last connection in the session, and we are 4113 * performing session reinstatement or falling back to ERL=0, call 4114 * iscsit_stop_session() without sleeping to shutdown the other 4115 * active connections. 4116 */ 4117 if (atomic_read(&sess->nconn)) { 4118 if (!atomic_read(&sess->session_reinstatement) && 4119 !atomic_read(&sess->session_fall_back_to_erl0)) { 4120 spin_unlock_bh(&sess->conn_lock); 4121 return 0; 4122 } 4123 if (!atomic_read(&sess->session_stop_active)) { 4124 atomic_set(&sess->session_stop_active, 1); 4125 spin_unlock_bh(&sess->conn_lock); 4126 iscsit_stop_session(sess, 0, 0); 4127 return 0; 4128 } 4129 spin_unlock_bh(&sess->conn_lock); 4130 return 0; 4131 } 4132 4133 /* 4134 * If this was the last connection in the session and one of the 4135 * following is occurring: 4136 * 4137 * Session Reinstatement is not being performed, and are falling back 4138 * to ERL=0 call iscsit_close_session(). 4139 * 4140 * Session Logout was requested. iscsit_close_session() will be called 4141 * elsewhere. 4142 * 4143 * Session Continuation is not being performed, start the Time2Retain 4144 * handler and check if sleep_on_sess_wait_sem is active. 4145 */ 4146 if (!atomic_read(&sess->session_reinstatement) && 4147 atomic_read(&sess->session_fall_back_to_erl0)) { 4148 spin_unlock_bh(&sess->conn_lock); 4149 iscsit_close_session(sess); 4150 4151 return 0; 4152 } else if (atomic_read(&sess->session_logout)) { 4153 pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); 4154 sess->session_state = TARG_SESS_STATE_FREE; 4155 spin_unlock_bh(&sess->conn_lock); 4156 4157 if (atomic_read(&sess->sleep_on_sess_wait_comp)) 4158 complete(&sess->session_wait_comp); 4159 4160 return 0; 4161 } else { 4162 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); 4163 sess->session_state = TARG_SESS_STATE_FAILED; 4164 4165 if (!atomic_read(&sess->session_continuation)) { 4166 spin_unlock_bh(&sess->conn_lock); 4167 iscsit_start_time2retain_handler(sess); 4168 } else 4169 spin_unlock_bh(&sess->conn_lock); 4170 4171 if (atomic_read(&sess->sleep_on_sess_wait_comp)) 4172 complete(&sess->session_wait_comp); 4173 4174 return 0; 4175 } 4176 spin_unlock_bh(&sess->conn_lock); 4177 4178 return 0; 4179 } 4180 4181 int iscsit_close_session(struct iscsi_session *sess) 4182 { 4183 struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); 4184 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4185 4186 if (atomic_read(&sess->nconn)) { 4187 pr_err("%d connection(s) still exist for iSCSI session" 4188 " to %s\n", atomic_read(&sess->nconn), 4189 sess->sess_ops->InitiatorName); 4190 BUG(); 4191 } 4192 4193 spin_lock_bh(&se_tpg->session_lock); 4194 atomic_set(&sess->session_logout, 1); 4195 atomic_set(&sess->session_reinstatement, 1); 4196 iscsit_stop_time2retain_timer(sess); 4197 spin_unlock_bh(&se_tpg->session_lock); 4198 4199 /* 4200 * transport_deregister_session_configfs() will clear the 4201 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context 4202 * can be setting it again with __transport_register_session() in 4203 * iscsi_post_login_handler() again after the iscsit_stop_session() 4204 * completes in iscsi_np context. 4205 */ 4206 transport_deregister_session_configfs(sess->se_sess); 4207 4208 /* 4209 * If any other processes are accessing this session pointer we must 4210 * wait until they have completed. If we are in an interrupt (the 4211 * time2retain handler) and contain and active session usage count we 4212 * restart the timer and exit. 4213 */ 4214 if (!in_interrupt()) { 4215 if (iscsit_check_session_usage_count(sess) == 1) 4216 iscsit_stop_session(sess, 1, 1); 4217 } else { 4218 if (iscsit_check_session_usage_count(sess) == 2) { 4219 atomic_set(&sess->session_logout, 0); 4220 iscsit_start_time2retain_handler(sess); 4221 return 0; 4222 } 4223 } 4224 4225 transport_deregister_session(sess->se_sess); 4226 4227 if (sess->sess_ops->ErrorRecoveryLevel == 2) 4228 iscsit_free_connection_recovery_entires(sess); 4229 4230 iscsit_free_all_ooo_cmdsns(sess); 4231 4232 spin_lock_bh(&se_tpg->session_lock); 4233 pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); 4234 sess->session_state = TARG_SESS_STATE_FREE; 4235 pr_debug("Released iSCSI session from node: %s\n", 4236 sess->sess_ops->InitiatorName); 4237 tpg->nsessions--; 4238 if (tpg->tpg_tiqn) 4239 tpg->tpg_tiqn->tiqn_nsessions--; 4240 4241 pr_debug("Decremented number of active iSCSI Sessions on" 4242 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions); 4243 4244 spin_lock(&sess_idr_lock); 4245 idr_remove(&sess_idr, sess->session_index); 4246 spin_unlock(&sess_idr_lock); 4247 4248 kfree(sess->sess_ops); 4249 sess->sess_ops = NULL; 4250 spin_unlock_bh(&se_tpg->session_lock); 4251 4252 kfree(sess); 4253 return 0; 4254 } 4255 4256 static void iscsit_logout_post_handler_closesession( 4257 struct iscsi_conn *conn) 4258 { 4259 struct iscsi_session *sess = conn->sess; 4260 4261 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); 4262 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); 4263 4264 atomic_set(&conn->conn_logout_remove, 0); 4265 complete(&conn->conn_logout_comp); 4266 4267 iscsit_dec_conn_usage_count(conn); 4268 iscsit_stop_session(sess, 1, 1); 4269 iscsit_dec_session_usage_count(sess); 4270 iscsit_close_session(sess); 4271 } 4272 4273 static void iscsit_logout_post_handler_samecid( 4274 struct iscsi_conn *conn) 4275 { 4276 iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD); 4277 iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD); 4278 4279 atomic_set(&conn->conn_logout_remove, 0); 4280 complete(&conn->conn_logout_comp); 4281 4282 iscsit_cause_connection_reinstatement(conn, 1); 4283 iscsit_dec_conn_usage_count(conn); 4284 } 4285 4286 static void iscsit_logout_post_handler_diffcid( 4287 struct iscsi_conn *conn, 4288 u16 cid) 4289 { 4290 struct iscsi_conn *l_conn; 4291 struct iscsi_session *sess = conn->sess; 4292 4293 if (!sess) 4294 return; 4295 4296 spin_lock_bh(&sess->conn_lock); 4297 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) { 4298 if (l_conn->cid == cid) { 4299 iscsit_inc_conn_usage_count(l_conn); 4300 break; 4301 } 4302 } 4303 spin_unlock_bh(&sess->conn_lock); 4304 4305 if (!l_conn) 4306 return; 4307 4308 if (l_conn->sock) 4309 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN); 4310 4311 spin_lock_bh(&l_conn->state_lock); 4312 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 4313 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; 4314 spin_unlock_bh(&l_conn->state_lock); 4315 4316 iscsit_cause_connection_reinstatement(l_conn, 1); 4317 iscsit_dec_conn_usage_count(l_conn); 4318 } 4319 4320 /* 4321 * Return of 0 causes the TX thread to restart. 4322 */ 4323 static int iscsit_logout_post_handler( 4324 struct iscsi_cmd *cmd, 4325 struct iscsi_conn *conn) 4326 { 4327 int ret = 0; 4328 4329 switch (cmd->logout_reason) { 4330 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 4331 switch (cmd->logout_response) { 4332 case ISCSI_LOGOUT_SUCCESS: 4333 case ISCSI_LOGOUT_CLEANUP_FAILED: 4334 default: 4335 iscsit_logout_post_handler_closesession(conn); 4336 break; 4337 } 4338 ret = 0; 4339 break; 4340 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 4341 if (conn->cid == cmd->logout_cid) { 4342 switch (cmd->logout_response) { 4343 case ISCSI_LOGOUT_SUCCESS: 4344 case ISCSI_LOGOUT_CLEANUP_FAILED: 4345 default: 4346 iscsit_logout_post_handler_samecid(conn); 4347 break; 4348 } 4349 ret = 0; 4350 } else { 4351 switch (cmd->logout_response) { 4352 case ISCSI_LOGOUT_SUCCESS: 4353 iscsit_logout_post_handler_diffcid(conn, 4354 cmd->logout_cid); 4355 break; 4356 case ISCSI_LOGOUT_CID_NOT_FOUND: 4357 case ISCSI_LOGOUT_CLEANUP_FAILED: 4358 default: 4359 break; 4360 } 4361 ret = 1; 4362 } 4363 break; 4364 case ISCSI_LOGOUT_REASON_RECOVERY: 4365 switch (cmd->logout_response) { 4366 case ISCSI_LOGOUT_SUCCESS: 4367 case ISCSI_LOGOUT_CID_NOT_FOUND: 4368 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED: 4369 case ISCSI_LOGOUT_CLEANUP_FAILED: 4370 default: 4371 break; 4372 } 4373 ret = 1; 4374 break; 4375 default: 4376 break; 4377 4378 } 4379 return ret; 4380 } 4381 4382 void iscsit_fail_session(struct iscsi_session *sess) 4383 { 4384 struct iscsi_conn *conn; 4385 4386 spin_lock_bh(&sess->conn_lock); 4387 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 4388 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n"); 4389 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT; 4390 } 4391 spin_unlock_bh(&sess->conn_lock); 4392 4393 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); 4394 sess->session_state = TARG_SESS_STATE_FAILED; 4395 } 4396 4397 int iscsit_free_session(struct iscsi_session *sess) 4398 { 4399 u16 conn_count = atomic_read(&sess->nconn); 4400 struct iscsi_conn *conn, *conn_tmp = NULL; 4401 int is_last; 4402 4403 spin_lock_bh(&sess->conn_lock); 4404 atomic_set(&sess->sleep_on_sess_wait_comp, 1); 4405 4406 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, 4407 conn_list) { 4408 if (conn_count == 0) 4409 break; 4410 4411 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { 4412 is_last = 1; 4413 } else { 4414 iscsit_inc_conn_usage_count(conn_tmp); 4415 is_last = 0; 4416 } 4417 iscsit_inc_conn_usage_count(conn); 4418 4419 spin_unlock_bh(&sess->conn_lock); 4420 iscsit_cause_connection_reinstatement(conn, 1); 4421 spin_lock_bh(&sess->conn_lock); 4422 4423 iscsit_dec_conn_usage_count(conn); 4424 if (is_last == 0) 4425 iscsit_dec_conn_usage_count(conn_tmp); 4426 4427 conn_count--; 4428 } 4429 4430 if (atomic_read(&sess->nconn)) { 4431 spin_unlock_bh(&sess->conn_lock); 4432 wait_for_completion(&sess->session_wait_comp); 4433 } else 4434 spin_unlock_bh(&sess->conn_lock); 4435 4436 iscsit_close_session(sess); 4437 return 0; 4438 } 4439 4440 void iscsit_stop_session( 4441 struct iscsi_session *sess, 4442 int session_sleep, 4443 int connection_sleep) 4444 { 4445 u16 conn_count = atomic_read(&sess->nconn); 4446 struct iscsi_conn *conn, *conn_tmp = NULL; 4447 int is_last; 4448 4449 spin_lock_bh(&sess->conn_lock); 4450 if (session_sleep) 4451 atomic_set(&sess->sleep_on_sess_wait_comp, 1); 4452 4453 if (connection_sleep) { 4454 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, 4455 conn_list) { 4456 if (conn_count == 0) 4457 break; 4458 4459 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { 4460 is_last = 1; 4461 } else { 4462 iscsit_inc_conn_usage_count(conn_tmp); 4463 is_last = 0; 4464 } 4465 iscsit_inc_conn_usage_count(conn); 4466 4467 spin_unlock_bh(&sess->conn_lock); 4468 iscsit_cause_connection_reinstatement(conn, 1); 4469 spin_lock_bh(&sess->conn_lock); 4470 4471 iscsit_dec_conn_usage_count(conn); 4472 if (is_last == 0) 4473 iscsit_dec_conn_usage_count(conn_tmp); 4474 conn_count--; 4475 } 4476 } else { 4477 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) 4478 iscsit_cause_connection_reinstatement(conn, 0); 4479 } 4480 4481 if (session_sleep && atomic_read(&sess->nconn)) { 4482 spin_unlock_bh(&sess->conn_lock); 4483 wait_for_completion(&sess->session_wait_comp); 4484 } else 4485 spin_unlock_bh(&sess->conn_lock); 4486 } 4487 4488 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) 4489 { 4490 struct iscsi_session *sess; 4491 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4492 struct se_session *se_sess, *se_sess_tmp; 4493 int session_count = 0; 4494 4495 spin_lock_bh(&se_tpg->session_lock); 4496 if (tpg->nsessions && !force) { 4497 spin_unlock_bh(&se_tpg->session_lock); 4498 return -1; 4499 } 4500 4501 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, 4502 sess_list) { 4503 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; 4504 4505 spin_lock(&sess->conn_lock); 4506 if (atomic_read(&sess->session_fall_back_to_erl0) || 4507 atomic_read(&sess->session_logout) || 4508 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { 4509 spin_unlock(&sess->conn_lock); 4510 continue; 4511 } 4512 atomic_set(&sess->session_reinstatement, 1); 4513 spin_unlock(&sess->conn_lock); 4514 spin_unlock_bh(&se_tpg->session_lock); 4515 4516 iscsit_free_session(sess); 4517 spin_lock_bh(&se_tpg->session_lock); 4518 4519 session_count++; 4520 } 4521 spin_unlock_bh(&se_tpg->session_lock); 4522 4523 pr_debug("Released %d iSCSI Session(s) from Target Portal" 4524 " Group: %hu\n", session_count, tpg->tpgt); 4525 return 0; 4526 } 4527 4528 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure"); 4529 MODULE_VERSION("4.1.x"); 4530 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 4531 MODULE_LICENSE("GPL"); 4532 4533 module_init(iscsi_target_init_module); 4534 module_exit(iscsi_target_cleanup_module); 4535