1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * This file contains main functions related to the iSCSI Target Core Driver. 4 * 5 * (c) Copyright 2007-2013 Datera, Inc. 6 * 7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 8 * 9 ******************************************************************************/ 10 11 #include <linux/crc32c.h> 12 #include <linux/string.h> 13 #include <linux/kthread.h> 14 #include <linux/completion.h> 15 #include <linux/module.h> 16 #include <linux/vmalloc.h> 17 #include <linux/idr.h> 18 #include <linux/delay.h> 19 #include <linux/sched/signal.h> 20 #include <linux/unaligned.h> 21 #include <linux/inet.h> 22 #include <net/ipv6.h> 23 #include <scsi/scsi_proto.h> 24 #include <scsi/iscsi_proto.h> 25 #include <scsi/scsi_tcq.h> 26 #include <target/target_core_base.h> 27 #include <target/target_core_fabric.h> 28 29 #include <target/target_core_backend.h> 30 #include <target/iscsi/iscsi_target_core.h> 31 #include "iscsi_target_parameters.h" 32 #include "iscsi_target_seq_pdu_list.h" 33 #include "iscsi_target_datain_values.h" 34 #include "iscsi_target_erl0.h" 35 #include "iscsi_target_erl1.h" 36 #include "iscsi_target_erl2.h" 37 #include "iscsi_target_login.h" 38 #include "iscsi_target_tmr.h" 39 #include "iscsi_target_tpg.h" 40 #include "iscsi_target_util.h" 41 #include "iscsi_target.h" 42 #include "iscsi_target_device.h" 43 #include <target/iscsi/iscsi_target_stat.h> 44 45 #include <target/iscsi/iscsi_transport.h> 46 47 static LIST_HEAD(g_tiqn_list); 48 static LIST_HEAD(g_np_list); 49 static DEFINE_SPINLOCK(tiqn_lock); 50 static DEFINE_MUTEX(np_lock); 51 52 static struct idr tiqn_idr; 53 DEFINE_IDA(sess_ida); 54 struct mutex auth_id_lock; 55 56 struct iscsit_global *iscsit_global; 57 58 struct kmem_cache *lio_qr_cache; 59 struct kmem_cache *lio_dr_cache; 60 struct kmem_cache *lio_ooo_cache; 61 struct kmem_cache *lio_r2t_cache; 62 63 static int iscsit_handle_immediate_data(struct iscsit_cmd *, 64 struct iscsi_scsi_req *, u32); 65 66 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf) 67 { 68 struct iscsi_tiqn *tiqn = NULL; 69 70 spin_lock(&tiqn_lock); 71 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 72 if (!strcmp(tiqn->tiqn, buf)) { 73 74 spin_lock(&tiqn->tiqn_state_lock); 75 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { 76 tiqn->tiqn_access_count++; 77 spin_unlock(&tiqn->tiqn_state_lock); 78 spin_unlock(&tiqn_lock); 79 return tiqn; 80 } 81 spin_unlock(&tiqn->tiqn_state_lock); 82 } 83 } 84 spin_unlock(&tiqn_lock); 85 86 return NULL; 87 } 88 89 static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn) 90 { 91 spin_lock(&tiqn->tiqn_state_lock); 92 if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) { 93 tiqn->tiqn_state = TIQN_STATE_SHUTDOWN; 94 spin_unlock(&tiqn->tiqn_state_lock); 95 return 0; 96 } 97 spin_unlock(&tiqn->tiqn_state_lock); 98 99 return -1; 100 } 101 102 void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn) 103 { 104 spin_lock(&tiqn->tiqn_state_lock); 105 tiqn->tiqn_access_count--; 106 spin_unlock(&tiqn->tiqn_state_lock); 107 } 108 109 /* 110 * Note that IQN formatting is expected to be done in userspace, and 111 * no explict IQN format checks are done here. 112 */ 113 struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf) 114 { 115 struct iscsi_tiqn *tiqn = NULL; 116 int ret; 117 118 if (strlen(buf) >= ISCSI_IQN_LEN) { 119 pr_err("Target IQN exceeds %d bytes\n", 120 ISCSI_IQN_LEN); 121 return ERR_PTR(-EINVAL); 122 } 123 124 tiqn = kzalloc(sizeof(*tiqn), GFP_KERNEL); 125 if (!tiqn) 126 return ERR_PTR(-ENOMEM); 127 128 sprintf(tiqn->tiqn, "%s", buf); 129 INIT_LIST_HEAD(&tiqn->tiqn_list); 130 INIT_LIST_HEAD(&tiqn->tiqn_tpg_list); 131 spin_lock_init(&tiqn->tiqn_state_lock); 132 spin_lock_init(&tiqn->tiqn_tpg_lock); 133 spin_lock_init(&tiqn->sess_err_stats.lock); 134 spin_lock_init(&tiqn->login_stats.lock); 135 spin_lock_init(&tiqn->logout_stats.lock); 136 137 tiqn->tiqn_state = TIQN_STATE_ACTIVE; 138 139 idr_preload(GFP_KERNEL); 140 spin_lock(&tiqn_lock); 141 142 ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT); 143 if (ret < 0) { 144 pr_err("idr_alloc() failed for tiqn->tiqn_index\n"); 145 spin_unlock(&tiqn_lock); 146 idr_preload_end(); 147 kfree(tiqn); 148 return ERR_PTR(ret); 149 } 150 tiqn->tiqn_index = ret; 151 list_add_tail(&tiqn->tiqn_list, &g_tiqn_list); 152 153 spin_unlock(&tiqn_lock); 154 idr_preload_end(); 155 156 pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn); 157 158 return tiqn; 159 160 } 161 162 static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn) 163 { 164 /* 165 * Wait for accesses to said struct iscsi_tiqn to end. 166 */ 167 spin_lock(&tiqn->tiqn_state_lock); 168 while (tiqn->tiqn_access_count != 0) { 169 spin_unlock(&tiqn->tiqn_state_lock); 170 msleep(10); 171 spin_lock(&tiqn->tiqn_state_lock); 172 } 173 spin_unlock(&tiqn->tiqn_state_lock); 174 } 175 176 void iscsit_del_tiqn(struct iscsi_tiqn *tiqn) 177 { 178 /* 179 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN 180 * while holding tiqn->tiqn_state_lock. This means that all subsequent 181 * attempts to access this struct iscsi_tiqn will fail from both transport 182 * fabric and control code paths. 183 */ 184 if (iscsit_set_tiqn_shutdown(tiqn) < 0) { 185 pr_err("iscsit_set_tiqn_shutdown() failed\n"); 186 return; 187 } 188 189 iscsit_wait_for_tiqn(tiqn); 190 191 spin_lock(&tiqn_lock); 192 list_del(&tiqn->tiqn_list); 193 idr_remove(&tiqn_idr, tiqn->tiqn_index); 194 spin_unlock(&tiqn_lock); 195 196 pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n", 197 tiqn->tiqn); 198 kfree(tiqn); 199 } 200 201 int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) 202 { 203 int ret; 204 /* 205 * Determine if the network portal is accepting storage traffic. 206 */ 207 spin_lock_bh(&np->np_thread_lock); 208 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 209 spin_unlock_bh(&np->np_thread_lock); 210 return -1; 211 } 212 spin_unlock_bh(&np->np_thread_lock); 213 /* 214 * Determine if the portal group is accepting storage traffic. 215 */ 216 spin_lock_bh(&tpg->tpg_state_lock); 217 if (tpg->tpg_state != TPG_STATE_ACTIVE) { 218 spin_unlock_bh(&tpg->tpg_state_lock); 219 return -1; 220 } 221 spin_unlock_bh(&tpg->tpg_state_lock); 222 223 /* 224 * Here we serialize access across the TIQN+TPG Tuple. 225 */ 226 ret = down_interruptible(&tpg->np_login_sem); 227 if (ret != 0) 228 return -1; 229 230 spin_lock_bh(&tpg->tpg_state_lock); 231 if (tpg->tpg_state != TPG_STATE_ACTIVE) { 232 spin_unlock_bh(&tpg->tpg_state_lock); 233 up(&tpg->np_login_sem); 234 return -1; 235 } 236 spin_unlock_bh(&tpg->tpg_state_lock); 237 238 return 0; 239 } 240 241 void iscsit_login_kref_put(struct kref *kref) 242 { 243 struct iscsi_tpg_np *tpg_np = container_of(kref, 244 struct iscsi_tpg_np, tpg_np_kref); 245 246 complete(&tpg_np->tpg_np_comp); 247 } 248 249 int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg, 250 struct iscsi_tpg_np *tpg_np) 251 { 252 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 253 254 up(&tpg->np_login_sem); 255 256 if (tpg_np) 257 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); 258 259 if (tiqn) 260 iscsit_put_tiqn_for_login(tiqn); 261 262 return 0; 263 } 264 265 bool iscsit_check_np_match( 266 struct sockaddr_storage *sockaddr, 267 struct iscsi_np *np, 268 int network_transport) 269 { 270 struct sockaddr_in *sock_in, *sock_in_e; 271 struct sockaddr_in6 *sock_in6, *sock_in6_e; 272 bool ip_match = false; 273 u16 port, port_e; 274 275 if (sockaddr->ss_family == AF_INET6) { 276 sock_in6 = (struct sockaddr_in6 *)sockaddr; 277 sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr; 278 279 if (!memcmp(&sock_in6->sin6_addr.in6_u, 280 &sock_in6_e->sin6_addr.in6_u, 281 sizeof(struct in6_addr))) 282 ip_match = true; 283 284 port = ntohs(sock_in6->sin6_port); 285 port_e = ntohs(sock_in6_e->sin6_port); 286 } else { 287 sock_in = (struct sockaddr_in *)sockaddr; 288 sock_in_e = (struct sockaddr_in *)&np->np_sockaddr; 289 290 if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr) 291 ip_match = true; 292 293 port = ntohs(sock_in->sin_port); 294 port_e = ntohs(sock_in_e->sin_port); 295 } 296 297 if (ip_match && (port_e == port) && 298 (np->np_network_transport == network_transport)) 299 return true; 300 301 return false; 302 } 303 304 static struct iscsi_np *iscsit_get_np( 305 struct sockaddr_storage *sockaddr, 306 int network_transport) 307 { 308 struct iscsi_np *np; 309 bool match; 310 311 lockdep_assert_held(&np_lock); 312 313 list_for_each_entry(np, &g_np_list, np_list) { 314 spin_lock_bh(&np->np_thread_lock); 315 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { 316 spin_unlock_bh(&np->np_thread_lock); 317 continue; 318 } 319 320 match = iscsit_check_np_match(sockaddr, np, network_transport); 321 if (match) { 322 /* 323 * Increment the np_exports reference count now to 324 * prevent iscsit_del_np() below from being called 325 * while iscsi_tpg_add_network_portal() is called. 326 */ 327 np->np_exports++; 328 spin_unlock_bh(&np->np_thread_lock); 329 return np; 330 } 331 spin_unlock_bh(&np->np_thread_lock); 332 } 333 334 return NULL; 335 } 336 337 struct iscsi_np *iscsit_add_np( 338 struct sockaddr_storage *sockaddr, 339 int network_transport) 340 { 341 struct iscsi_np *np; 342 int ret; 343 344 mutex_lock(&np_lock); 345 346 /* 347 * Locate the existing struct iscsi_np if already active.. 348 */ 349 np = iscsit_get_np(sockaddr, network_transport); 350 if (np) { 351 mutex_unlock(&np_lock); 352 return np; 353 } 354 355 np = kzalloc(sizeof(*np), GFP_KERNEL); 356 if (!np) { 357 mutex_unlock(&np_lock); 358 return ERR_PTR(-ENOMEM); 359 } 360 361 np->np_flags |= NPF_IP_NETWORK; 362 np->np_network_transport = network_transport; 363 spin_lock_init(&np->np_thread_lock); 364 init_completion(&np->np_restart_comp); 365 INIT_LIST_HEAD(&np->np_list); 366 367 ret = iscsi_target_setup_login_socket(np, sockaddr); 368 if (ret != 0) { 369 kfree(np); 370 mutex_unlock(&np_lock); 371 return ERR_PTR(ret); 372 } 373 374 np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np"); 375 if (IS_ERR(np->np_thread)) { 376 pr_err("Unable to create kthread: iscsi_np\n"); 377 ret = PTR_ERR(np->np_thread); 378 kfree(np); 379 mutex_unlock(&np_lock); 380 return ERR_PTR(ret); 381 } 382 /* 383 * Increment the np_exports reference count now to prevent 384 * iscsit_del_np() below from being run while a new call to 385 * iscsi_tpg_add_network_portal() for a matching iscsi_np is 386 * active. We don't need to hold np->np_thread_lock at this 387 * point because iscsi_np has not been added to g_np_list yet. 388 */ 389 np->np_exports = 1; 390 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 391 392 list_add_tail(&np->np_list, &g_np_list); 393 mutex_unlock(&np_lock); 394 395 pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n", 396 &np->np_sockaddr, np->np_transport->name); 397 398 return np; 399 } 400 401 int iscsit_reset_np_thread( 402 struct iscsi_np *np, 403 struct iscsi_tpg_np *tpg_np, 404 struct iscsi_portal_group *tpg, 405 bool shutdown) 406 { 407 spin_lock_bh(&np->np_thread_lock); 408 if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) { 409 spin_unlock_bh(&np->np_thread_lock); 410 return 0; 411 } 412 np->np_thread_state = ISCSI_NP_THREAD_RESET; 413 atomic_inc(&np->np_reset_count); 414 415 if (np->np_thread) { 416 spin_unlock_bh(&np->np_thread_lock); 417 send_sig(SIGINT, np->np_thread, 1); 418 wait_for_completion(&np->np_restart_comp); 419 spin_lock_bh(&np->np_thread_lock); 420 } 421 spin_unlock_bh(&np->np_thread_lock); 422 423 if (tpg_np && shutdown) { 424 kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put); 425 426 wait_for_completion(&tpg_np->tpg_np_comp); 427 } 428 429 return 0; 430 } 431 432 static void iscsit_free_np(struct iscsi_np *np) 433 { 434 if (np->np_socket) 435 sock_release(np->np_socket); 436 } 437 438 int iscsit_del_np(struct iscsi_np *np) 439 { 440 spin_lock_bh(&np->np_thread_lock); 441 np->np_exports--; 442 if (np->np_exports) { 443 np->enabled = true; 444 spin_unlock_bh(&np->np_thread_lock); 445 return 0; 446 } 447 np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN; 448 spin_unlock_bh(&np->np_thread_lock); 449 450 if (np->np_thread) { 451 /* 452 * We need to send the signal to wakeup Linux/Net 453 * which may be sleeping in sock_accept().. 454 */ 455 send_sig(SIGINT, np->np_thread, 1); 456 kthread_stop(np->np_thread); 457 np->np_thread = NULL; 458 } 459 460 np->np_transport->iscsit_free_np(np); 461 462 mutex_lock(&np_lock); 463 list_del(&np->np_list); 464 mutex_unlock(&np_lock); 465 466 pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n", 467 &np->np_sockaddr, np->np_transport->name); 468 469 iscsit_put_transport(np->np_transport); 470 kfree(np); 471 return 0; 472 } 473 474 static void iscsit_get_rx_pdu(struct iscsit_conn *); 475 476 int iscsit_queue_rsp(struct iscsit_conn *conn, struct iscsit_cmd *cmd) 477 { 478 return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); 479 } 480 EXPORT_SYMBOL(iscsit_queue_rsp); 481 482 void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd) 483 { 484 spin_lock_bh(&conn->cmd_lock); 485 if (!list_empty(&cmd->i_conn_node)) 486 list_del_init(&cmd->i_conn_node); 487 spin_unlock_bh(&conn->cmd_lock); 488 489 __iscsit_free_cmd(cmd, true); 490 } 491 EXPORT_SYMBOL(iscsit_aborted_task); 492 493 static u32 iscsit_crc_buf(const void *buf, u32 payload_length, 494 u32 padding, const void *pad_bytes); 495 static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *); 496 497 static int 498 iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 499 const void *data_buf, u32 data_buf_len) 500 { 501 struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu; 502 struct kvec *iov; 503 u32 niov = 0, tx_size = ISCSI_HDR_LEN; 504 int ret; 505 506 iov = &cmd->iov_misc[0]; 507 iov[niov].iov_base = cmd->pdu; 508 iov[niov++].iov_len = ISCSI_HDR_LEN; 509 510 if (conn->conn_ops->HeaderDigest) { 511 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 512 513 *header_digest = iscsit_crc_buf(hdr, ISCSI_HDR_LEN, 0, NULL); 514 515 iov[0].iov_len += ISCSI_CRC_LEN; 516 tx_size += ISCSI_CRC_LEN; 517 pr_debug("Attaching CRC32C HeaderDigest" 518 " to opcode 0x%x 0x%08x\n", 519 hdr->opcode, *header_digest); 520 } 521 522 if (data_buf_len) { 523 u32 padding = ((-data_buf_len) & 3); 524 525 iov[niov].iov_base = (void *)data_buf; 526 iov[niov++].iov_len = data_buf_len; 527 tx_size += data_buf_len; 528 529 if (padding != 0) { 530 iov[niov].iov_base = &cmd->pad_bytes; 531 iov[niov++].iov_len = padding; 532 tx_size += padding; 533 pr_debug("Attaching %u additional" 534 " padding bytes.\n", padding); 535 } 536 537 if (conn->conn_ops->DataDigest) { 538 cmd->data_crc = iscsit_crc_buf(data_buf, data_buf_len, 539 padding, 540 &cmd->pad_bytes); 541 iov[niov].iov_base = &cmd->data_crc; 542 iov[niov++].iov_len = ISCSI_CRC_LEN; 543 tx_size += ISCSI_CRC_LEN; 544 pr_debug("Attached DataDigest for %u" 545 " bytes opcode 0x%x, CRC 0x%08x\n", 546 data_buf_len, hdr->opcode, cmd->data_crc); 547 } 548 } 549 550 cmd->iov_misc_count = niov; 551 cmd->tx_size = tx_size; 552 553 ret = iscsit_send_tx_data(cmd, conn, 1); 554 if (ret < 0) { 555 iscsit_tx_thread_wait_for_tcp(conn); 556 return ret; 557 } 558 559 return 0; 560 } 561 562 static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec, 563 u32 data_offset, u32 data_length); 564 static void iscsit_unmap_iovec(struct iscsit_cmd *); 565 static u32 iscsit_crc_sglist(const struct iscsit_cmd *cmd, u32 data_length, 566 u32 padding, const u8 *pad_bytes); 567 static int 568 iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 569 const struct iscsi_datain *datain) 570 { 571 struct kvec *iov; 572 u32 iov_count = 0, tx_size = 0; 573 int ret, iov_ret; 574 575 iov = &cmd->iov_data[0]; 576 iov[iov_count].iov_base = cmd->pdu; 577 iov[iov_count++].iov_len = ISCSI_HDR_LEN; 578 tx_size += ISCSI_HDR_LEN; 579 580 if (conn->conn_ops->HeaderDigest) { 581 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 582 583 *header_digest = iscsit_crc_buf(cmd->pdu, ISCSI_HDR_LEN, 0, 584 NULL); 585 iov[0].iov_len += ISCSI_CRC_LEN; 586 tx_size += ISCSI_CRC_LEN; 587 588 pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n", 589 *header_digest); 590 } 591 592 iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[iov_count], 593 cmd->orig_iov_data_count - (iov_count + 2), 594 datain->offset, datain->length); 595 if (iov_ret < 0) 596 return -1; 597 598 iov_count += iov_ret; 599 tx_size += datain->length; 600 601 cmd->padding = ((-datain->length) & 3); 602 if (cmd->padding) { 603 iov[iov_count].iov_base = cmd->pad_bytes; 604 iov[iov_count++].iov_len = cmd->padding; 605 tx_size += cmd->padding; 606 607 pr_debug("Attaching %u padding bytes\n", cmd->padding); 608 } 609 610 if (conn->conn_ops->DataDigest) { 611 cmd->data_crc = iscsit_crc_sglist(cmd, datain->length, 612 cmd->padding, cmd->pad_bytes); 613 iov[iov_count].iov_base = &cmd->data_crc; 614 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 615 tx_size += ISCSI_CRC_LEN; 616 617 pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n", 618 datain->length + cmd->padding, cmd->data_crc); 619 } 620 621 cmd->iov_data_count = iov_count; 622 cmd->tx_size = tx_size; 623 624 ret = iscsit_fe_sendpage_sg(cmd, conn); 625 626 iscsit_unmap_iovec(cmd); 627 628 if (ret < 0) { 629 iscsit_tx_thread_wait_for_tcp(conn); 630 return ret; 631 } 632 633 return 0; 634 } 635 636 static int iscsit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 637 struct iscsi_datain_req *dr, const void *buf, 638 u32 buf_len) 639 { 640 if (dr) 641 return iscsit_xmit_datain_pdu(conn, cmd, buf); 642 else 643 return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len); 644 } 645 646 static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsit_conn *conn) 647 { 648 return TARGET_PROT_NORMAL; 649 } 650 651 static struct iscsit_transport iscsi_target_transport = { 652 .name = "iSCSI/TCP", 653 .transport_type = ISCSI_TCP, 654 .rdma_shutdown = false, 655 .owner = NULL, 656 .iscsit_setup_np = iscsit_setup_np, 657 .iscsit_accept_np = iscsit_accept_np, 658 .iscsit_free_np = iscsit_free_np, 659 .iscsit_get_login_rx = iscsit_get_login_rx, 660 .iscsit_put_login_tx = iscsit_put_login_tx, 661 .iscsit_get_dataout = iscsit_build_r2ts_for_cmd, 662 .iscsit_immediate_queue = iscsit_immediate_queue, 663 .iscsit_response_queue = iscsit_response_queue, 664 .iscsit_queue_data_in = iscsit_queue_rsp, 665 .iscsit_queue_status = iscsit_queue_rsp, 666 .iscsit_aborted_task = iscsit_aborted_task, 667 .iscsit_xmit_pdu = iscsit_xmit_pdu, 668 .iscsit_get_rx_pdu = iscsit_get_rx_pdu, 669 .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops, 670 }; 671 672 static int __init iscsi_target_init_module(void) 673 { 674 int ret = 0, size; 675 676 pr_debug("iSCSI-Target "ISCSIT_VERSION"\n"); 677 iscsit_global = kzalloc(sizeof(*iscsit_global), GFP_KERNEL); 678 if (!iscsit_global) 679 return -1; 680 681 spin_lock_init(&iscsit_global->ts_bitmap_lock); 682 mutex_init(&auth_id_lock); 683 idr_init(&tiqn_idr); 684 685 ret = target_register_template(&iscsi_ops); 686 if (ret) 687 goto out; 688 689 size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long); 690 iscsit_global->ts_bitmap = vzalloc(size); 691 if (!iscsit_global->ts_bitmap) 692 goto configfs_out; 693 694 if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) { 695 pr_err("Unable to allocate iscsit_global->allowed_cpumask\n"); 696 goto bitmap_out; 697 } 698 cpumask_setall(iscsit_global->allowed_cpumask); 699 700 lio_qr_cache = kmem_cache_create("lio_qr_cache", 701 sizeof(struct iscsi_queue_req), 702 __alignof__(struct iscsi_queue_req), 0, NULL); 703 if (!lio_qr_cache) { 704 pr_err("Unable to kmem_cache_create() for" 705 " lio_qr_cache\n"); 706 goto cpumask_out; 707 } 708 709 lio_dr_cache = kmem_cache_create("lio_dr_cache", 710 sizeof(struct iscsi_datain_req), 711 __alignof__(struct iscsi_datain_req), 0, NULL); 712 if (!lio_dr_cache) { 713 pr_err("Unable to kmem_cache_create() for" 714 " lio_dr_cache\n"); 715 goto qr_out; 716 } 717 718 lio_ooo_cache = kmem_cache_create("lio_ooo_cache", 719 sizeof(struct iscsi_ooo_cmdsn), 720 __alignof__(struct iscsi_ooo_cmdsn), 0, NULL); 721 if (!lio_ooo_cache) { 722 pr_err("Unable to kmem_cache_create() for" 723 " lio_ooo_cache\n"); 724 goto dr_out; 725 } 726 727 lio_r2t_cache = kmem_cache_create("lio_r2t_cache", 728 sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t), 729 0, NULL); 730 if (!lio_r2t_cache) { 731 pr_err("Unable to kmem_cache_create() for" 732 " lio_r2t_cache\n"); 733 goto ooo_out; 734 } 735 736 iscsit_register_transport(&iscsi_target_transport); 737 738 if (iscsit_load_discovery_tpg() < 0) 739 goto r2t_out; 740 741 return ret; 742 r2t_out: 743 iscsit_unregister_transport(&iscsi_target_transport); 744 kmem_cache_destroy(lio_r2t_cache); 745 ooo_out: 746 kmem_cache_destroy(lio_ooo_cache); 747 dr_out: 748 kmem_cache_destroy(lio_dr_cache); 749 qr_out: 750 kmem_cache_destroy(lio_qr_cache); 751 cpumask_out: 752 free_cpumask_var(iscsit_global->allowed_cpumask); 753 bitmap_out: 754 vfree(iscsit_global->ts_bitmap); 755 configfs_out: 756 /* XXX: this probably wants it to be it's own unwind step.. */ 757 if (iscsit_global->discovery_tpg) 758 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); 759 target_unregister_template(&iscsi_ops); 760 out: 761 kfree(iscsit_global); 762 return -ENOMEM; 763 } 764 765 static void __exit iscsi_target_cleanup_module(void) 766 { 767 iscsit_release_discovery_tpg(); 768 iscsit_unregister_transport(&iscsi_target_transport); 769 kmem_cache_destroy(lio_qr_cache); 770 kmem_cache_destroy(lio_dr_cache); 771 kmem_cache_destroy(lio_ooo_cache); 772 kmem_cache_destroy(lio_r2t_cache); 773 774 /* 775 * Shutdown discovery sessions and disable discovery TPG 776 */ 777 if (iscsit_global->discovery_tpg) 778 iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1); 779 780 target_unregister_template(&iscsi_ops); 781 782 free_cpumask_var(iscsit_global->allowed_cpumask); 783 vfree(iscsit_global->ts_bitmap); 784 kfree(iscsit_global); 785 } 786 787 int iscsit_add_reject( 788 struct iscsit_conn *conn, 789 u8 reason, 790 unsigned char *buf) 791 { 792 struct iscsit_cmd *cmd; 793 794 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 795 if (!cmd) 796 return -1; 797 798 cmd->iscsi_opcode = ISCSI_OP_REJECT; 799 cmd->reject_reason = reason; 800 801 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); 802 if (!cmd->buf_ptr) { 803 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 804 iscsit_free_cmd(cmd, false); 805 return -1; 806 } 807 808 spin_lock_bh(&conn->cmd_lock); 809 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 810 spin_unlock_bh(&conn->cmd_lock); 811 812 cmd->i_state = ISTATE_SEND_REJECT; 813 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 814 815 return -1; 816 } 817 EXPORT_SYMBOL(iscsit_add_reject); 818 819 static int iscsit_add_reject_from_cmd( 820 struct iscsit_cmd *cmd, 821 u8 reason, 822 bool add_to_conn, 823 unsigned char *buf) 824 { 825 struct iscsit_conn *conn; 826 const bool do_put = cmd->se_cmd.se_tfo != NULL; 827 828 if (!cmd->conn) { 829 pr_err("cmd->conn is NULL for ITT: 0x%08x\n", 830 cmd->init_task_tag); 831 return -1; 832 } 833 conn = cmd->conn; 834 835 cmd->iscsi_opcode = ISCSI_OP_REJECT; 836 cmd->reject_reason = reason; 837 838 cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); 839 if (!cmd->buf_ptr) { 840 pr_err("Unable to allocate memory for cmd->buf_ptr\n"); 841 iscsit_free_cmd(cmd, false); 842 return -1; 843 } 844 845 if (add_to_conn) { 846 spin_lock_bh(&conn->cmd_lock); 847 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 848 spin_unlock_bh(&conn->cmd_lock); 849 } 850 851 cmd->i_state = ISTATE_SEND_REJECT; 852 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 853 /* 854 * Perform the kref_put now if se_cmd has already been setup by 855 * scsit_setup_scsi_cmd() 856 */ 857 if (do_put) { 858 pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n"); 859 target_put_sess_cmd(&cmd->se_cmd); 860 } 861 return -1; 862 } 863 864 static int iscsit_add_reject_cmd(struct iscsit_cmd *cmd, u8 reason, 865 unsigned char *buf) 866 { 867 return iscsit_add_reject_from_cmd(cmd, reason, true, buf); 868 } 869 870 int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8 reason, unsigned char *buf) 871 { 872 return iscsit_add_reject_from_cmd(cmd, reason, false, buf); 873 } 874 EXPORT_SYMBOL(iscsit_reject_cmd); 875 876 /* 877 * Map some portion of the allocated scatterlist to an iovec, suitable for 878 * kernel sockets to copy data in/out. 879 */ 880 static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec, 881 u32 data_offset, u32 data_length) 882 { 883 u32 i = 0, orig_data_length = data_length; 884 struct scatterlist *sg; 885 unsigned int page_off; 886 887 /* 888 * We know each entry in t_data_sg contains a page. 889 */ 890 u32 ent = data_offset / PAGE_SIZE; 891 892 if (!data_length) 893 return 0; 894 895 if (ent >= cmd->se_cmd.t_data_nents) { 896 pr_err("Initial page entry out-of-bounds\n"); 897 goto overflow; 898 } 899 900 sg = &cmd->se_cmd.t_data_sg[ent]; 901 page_off = (data_offset % PAGE_SIZE); 902 903 cmd->first_data_sg = sg; 904 cmd->first_data_sg_off = page_off; 905 906 while (data_length) { 907 u32 cur_len; 908 909 if (WARN_ON_ONCE(!sg || i >= nvec)) 910 goto overflow; 911 912 cur_len = min_t(u32, data_length, sg->length - page_off); 913 914 iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off; 915 iov[i].iov_len = cur_len; 916 917 data_length -= cur_len; 918 page_off = 0; 919 sg = sg_next(sg); 920 i++; 921 } 922 923 cmd->kmapped_nents = i; 924 925 return i; 926 927 overflow: 928 pr_err("offset %d + length %d overflow; %d/%d; sg-list:\n", 929 data_offset, orig_data_length, i, nvec); 930 for_each_sg(cmd->se_cmd.t_data_sg, sg, 931 cmd->se_cmd.t_data_nents, i) { 932 pr_err("[%d] off %d len %d\n", 933 i, sg->offset, sg->length); 934 } 935 return -1; 936 } 937 938 static void iscsit_unmap_iovec(struct iscsit_cmd *cmd) 939 { 940 u32 i; 941 struct scatterlist *sg; 942 943 sg = cmd->first_data_sg; 944 945 for (i = 0; i < cmd->kmapped_nents; i++) 946 kunmap(sg_page(&sg[i])); 947 } 948 949 static void iscsit_ack_from_expstatsn(struct iscsit_conn *conn, u32 exp_statsn) 950 { 951 LIST_HEAD(ack_list); 952 struct iscsit_cmd *cmd, *cmd_p; 953 954 conn->exp_statsn = exp_statsn; 955 956 if (conn->sess->sess_ops->RDMAExtensions) 957 return; 958 959 spin_lock_bh(&conn->cmd_lock); 960 list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) { 961 spin_lock(&cmd->istate_lock); 962 if ((cmd->i_state == ISTATE_SENT_STATUS) && 963 iscsi_sna_lt(cmd->stat_sn, exp_statsn)) { 964 cmd->i_state = ISTATE_REMOVE; 965 spin_unlock(&cmd->istate_lock); 966 list_move_tail(&cmd->i_conn_node, &ack_list); 967 continue; 968 } 969 spin_unlock(&cmd->istate_lock); 970 } 971 spin_unlock_bh(&conn->cmd_lock); 972 973 list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { 974 list_del_init(&cmd->i_conn_node); 975 iscsit_free_cmd(cmd, false); 976 } 977 } 978 979 static int iscsit_allocate_iovecs(struct iscsit_cmd *cmd) 980 { 981 u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE)); 982 983 iov_count += ISCSI_IOV_DATA_BUFFER; 984 cmd->iov_data = kcalloc(iov_count, sizeof(*cmd->iov_data), GFP_KERNEL); 985 if (!cmd->iov_data) 986 return -ENOMEM; 987 988 cmd->orig_iov_data_count = iov_count; 989 return 0; 990 } 991 992 int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 993 unsigned char *buf) 994 { 995 int data_direction, payload_length; 996 struct iscsi_ecdb_ahdr *ecdb_ahdr; 997 struct iscsi_scsi_req *hdr; 998 int iscsi_task_attr; 999 unsigned char *cdb; 1000 int sam_task_attr; 1001 1002 atomic_long_inc(&conn->sess->cmd_pdus); 1003 1004 hdr = (struct iscsi_scsi_req *) buf; 1005 payload_length = ntoh24(hdr->dlength); 1006 1007 /* FIXME; Add checks for AdditionalHeaderSegment */ 1008 1009 if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && 1010 !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { 1011 pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL" 1012 " not set. Bad iSCSI Initiator.\n"); 1013 return iscsit_add_reject_cmd(cmd, 1014 ISCSI_REASON_BOOKMARK_INVALID, buf); 1015 } 1016 1017 if (((hdr->flags & ISCSI_FLAG_CMD_READ) || 1018 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { 1019 /* 1020 * From RFC-3720 Section 10.3.1: 1021 * 1022 * "Either or both of R and W MAY be 1 when either the 1023 * Expected Data Transfer Length and/or Bidirectional Read 1024 * Expected Data Transfer Length are 0" 1025 * 1026 * For this case, go ahead and clear the unnecssary bits 1027 * to avoid any confusion with ->data_direction. 1028 */ 1029 hdr->flags &= ~ISCSI_FLAG_CMD_READ; 1030 hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; 1031 1032 pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" 1033 " set when Expected Data Transfer Length is 0 for" 1034 " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]); 1035 } 1036 1037 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && 1038 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { 1039 pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE" 1040 " MUST be set if Expected Data Transfer Length is not 0." 1041 " Bad iSCSI Initiator\n"); 1042 return iscsit_add_reject_cmd(cmd, 1043 ISCSI_REASON_BOOKMARK_INVALID, buf); 1044 } 1045 1046 if ((hdr->flags & ISCSI_FLAG_CMD_READ) && 1047 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) { 1048 pr_err("Bidirectional operations not supported!\n"); 1049 return iscsit_add_reject_cmd(cmd, 1050 ISCSI_REASON_BOOKMARK_INVALID, buf); 1051 } 1052 1053 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 1054 pr_err("Illegally set Immediate Bit in iSCSI Initiator" 1055 " Scsi Command PDU.\n"); 1056 return iscsit_add_reject_cmd(cmd, 1057 ISCSI_REASON_BOOKMARK_INVALID, buf); 1058 } 1059 1060 if (payload_length && !conn->sess->sess_ops->ImmediateData) { 1061 pr_err("ImmediateData=No but DataSegmentLength=%u," 1062 " protocol error.\n", payload_length); 1063 return iscsit_add_reject_cmd(cmd, 1064 ISCSI_REASON_PROTOCOL_ERROR, buf); 1065 } 1066 1067 if ((be32_to_cpu(hdr->data_length) == payload_length) && 1068 (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) { 1069 pr_err("Expected Data Transfer Length and Length of" 1070 " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL" 1071 " bit is not set protocol error\n"); 1072 return iscsit_add_reject_cmd(cmd, 1073 ISCSI_REASON_PROTOCOL_ERROR, buf); 1074 } 1075 1076 if (payload_length > be32_to_cpu(hdr->data_length)) { 1077 pr_err("DataSegmentLength: %u is greater than" 1078 " EDTL: %u, protocol error.\n", payload_length, 1079 hdr->data_length); 1080 return iscsit_add_reject_cmd(cmd, 1081 ISCSI_REASON_PROTOCOL_ERROR, buf); 1082 } 1083 1084 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { 1085 pr_err("DataSegmentLength: %u is greater than" 1086 " MaxXmitDataSegmentLength: %u, protocol error.\n", 1087 payload_length, conn->conn_ops->MaxXmitDataSegmentLength); 1088 return iscsit_add_reject_cmd(cmd, 1089 ISCSI_REASON_PROTOCOL_ERROR, buf); 1090 } 1091 1092 if (payload_length > conn->sess->sess_ops->FirstBurstLength) { 1093 pr_err("DataSegmentLength: %u is greater than" 1094 " FirstBurstLength: %u, protocol error.\n", 1095 payload_length, conn->sess->sess_ops->FirstBurstLength); 1096 return iscsit_add_reject_cmd(cmd, 1097 ISCSI_REASON_BOOKMARK_INVALID, buf); 1098 } 1099 1100 cdb = hdr->cdb; 1101 1102 if (hdr->hlength) { 1103 ecdb_ahdr = (struct iscsi_ecdb_ahdr *) (hdr + 1); 1104 if (ecdb_ahdr->ahstype != ISCSI_AHSTYPE_CDB) { 1105 pr_err("Additional Header Segment type %d not supported!\n", 1106 ecdb_ahdr->ahstype); 1107 return iscsit_add_reject_cmd(cmd, 1108 ISCSI_REASON_CMD_NOT_SUPPORTED, buf); 1109 } 1110 1111 cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15, 1112 GFP_KERNEL); 1113 if (cdb == NULL) 1114 return iscsit_add_reject_cmd(cmd, 1115 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 1116 memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE); 1117 memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb, 1118 be16_to_cpu(ecdb_ahdr->ahslength) - 1); 1119 } 1120 1121 data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE : 1122 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE : 1123 DMA_NONE; 1124 1125 cmd->data_direction = data_direction; 1126 iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK; 1127 /* 1128 * Figure out the SAM Task Attribute for the incoming SCSI CDB 1129 */ 1130 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || 1131 (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) 1132 sam_task_attr = TCM_SIMPLE_TAG; 1133 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) 1134 sam_task_attr = TCM_ORDERED_TAG; 1135 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) 1136 sam_task_attr = TCM_HEAD_TAG; 1137 else if (iscsi_task_attr == ISCSI_ATTR_ACA) 1138 sam_task_attr = TCM_ACA_TAG; 1139 else { 1140 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" 1141 " TCM_SIMPLE_TAG\n", iscsi_task_attr); 1142 sam_task_attr = TCM_SIMPLE_TAG; 1143 } 1144 1145 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; 1146 cmd->i_state = ISTATE_NEW_CMD; 1147 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 1148 cmd->immediate_data = (payload_length) ? 1 : 0; 1149 cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) && 1150 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0); 1151 if (cmd->unsolicited_data) 1152 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA; 1153 1154 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1155 if (hdr->flags & ISCSI_FLAG_CMD_READ) 1156 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); 1157 else 1158 cmd->targ_xfer_tag = 0xFFFFFFFF; 1159 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 1160 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 1161 cmd->first_burst_len = payload_length; 1162 1163 if (!conn->sess->sess_ops->RDMAExtensions && 1164 cmd->data_direction == DMA_FROM_DEVICE) { 1165 struct iscsi_datain_req *dr; 1166 1167 dr = iscsit_allocate_datain_req(); 1168 if (!dr) { 1169 if (cdb != hdr->cdb) 1170 kfree(cdb); 1171 return iscsit_add_reject_cmd(cmd, 1172 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 1173 } 1174 1175 iscsit_attach_datain_req(cmd, dr); 1176 } 1177 1178 /* 1179 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 1180 */ 1181 __target_init_cmd(&cmd->se_cmd, &iscsi_ops, 1182 conn->sess->se_sess, be32_to_cpu(hdr->data_length), 1183 cmd->data_direction, sam_task_attr, 1184 cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun), 1185 conn->cmd_cnt); 1186 1187 pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x," 1188 " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt, 1189 hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length, 1190 conn->cid); 1191 1192 target_get_sess_cmd(&cmd->se_cmd, true); 1193 1194 cmd->se_cmd.tag = (__force u32)cmd->init_task_tag; 1195 cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb, 1196 GFP_KERNEL); 1197 1198 if (cdb != hdr->cdb) 1199 kfree(cdb); 1200 1201 if (cmd->sense_reason) { 1202 if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) { 1203 return iscsit_add_reject_cmd(cmd, 1204 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 1205 } 1206 1207 goto attach_cmd; 1208 } 1209 1210 cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd); 1211 if (cmd->sense_reason) 1212 goto attach_cmd; 1213 1214 cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd); 1215 if (cmd->sense_reason) 1216 goto attach_cmd; 1217 1218 if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) { 1219 return iscsit_add_reject_cmd(cmd, 1220 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 1221 } 1222 1223 attach_cmd: 1224 spin_lock_bh(&conn->cmd_lock); 1225 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1226 spin_unlock_bh(&conn->cmd_lock); 1227 return 0; 1228 } 1229 EXPORT_SYMBOL(iscsit_setup_scsi_cmd); 1230 1231 void iscsit_set_unsolicited_dataout(struct iscsit_cmd *cmd) 1232 { 1233 iscsit_set_dataout_sequence_values(cmd); 1234 1235 spin_lock_bh(&cmd->dataout_timeout_lock); 1236 iscsit_start_dataout_timer(cmd, cmd->conn); 1237 spin_unlock_bh(&cmd->dataout_timeout_lock); 1238 } 1239 EXPORT_SYMBOL(iscsit_set_unsolicited_dataout); 1240 1241 int iscsit_process_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 1242 struct iscsi_scsi_req *hdr) 1243 { 1244 int cmdsn_ret = 0; 1245 /* 1246 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if 1247 * the Immediate Bit is not set, and no Immediate 1248 * Data is attached. 1249 * 1250 * A PDU/CmdSN carrying Immediate Data can only 1251 * be processed after the DataCRC has passed. 1252 * If the DataCRC fails, the CmdSN MUST NOT 1253 * be acknowledged. (See below) 1254 */ 1255 if (!cmd->immediate_data) { 1256 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, 1257 (unsigned char *)hdr, hdr->cmdsn); 1258 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1259 return -1; 1260 else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 1261 target_put_sess_cmd(&cmd->se_cmd); 1262 return 0; 1263 } 1264 } 1265 1266 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 1267 1268 /* 1269 * If no Immediate Data is attached, it's OK to return now. 1270 */ 1271 if (!cmd->immediate_data) { 1272 if (!cmd->sense_reason && cmd->unsolicited_data) 1273 iscsit_set_unsolicited_dataout(cmd); 1274 if (!cmd->sense_reason) 1275 return 0; 1276 1277 target_put_sess_cmd(&cmd->se_cmd); 1278 return 0; 1279 } 1280 1281 /* 1282 * Early CHECK_CONDITIONs with ImmediateData never make it to command 1283 * execution. These exceptions are processed in CmdSN order using 1284 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below. 1285 */ 1286 if (cmd->sense_reason) 1287 return 1; 1288 /* 1289 * Call directly into transport_generic_new_cmd() to perform 1290 * the backend memory allocation. 1291 */ 1292 cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd); 1293 if (cmd->sense_reason) 1294 return 1; 1295 1296 return 0; 1297 } 1298 EXPORT_SYMBOL(iscsit_process_scsi_cmd); 1299 1300 static int 1301 iscsit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr, 1302 bool dump_payload) 1303 { 1304 int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION; 1305 int rc; 1306 1307 /* 1308 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes. 1309 */ 1310 if (dump_payload) { 1311 u32 length = min(cmd->se_cmd.data_length - cmd->write_data_done, 1312 cmd->first_burst_len); 1313 1314 pr_debug("Dumping min(%d - %d, %d) = %d bytes of immediate data\n", 1315 cmd->se_cmd.data_length, cmd->write_data_done, 1316 cmd->first_burst_len, length); 1317 rc = iscsit_dump_data_payload(cmd->conn, length, 1); 1318 pr_debug("Finished dumping immediate data\n"); 1319 if (rc < 0) 1320 immed_ret = IMMEDIATE_DATA_CANNOT_RECOVER; 1321 } else { 1322 immed_ret = iscsit_handle_immediate_data(cmd, hdr, 1323 cmd->first_burst_len); 1324 } 1325 1326 if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) { 1327 /* 1328 * A PDU/CmdSN carrying Immediate Data passed 1329 * DataCRC, check against ExpCmdSN/MaxCmdSN if 1330 * Immediate Bit is not set. 1331 */ 1332 cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd, 1333 (unsigned char *)hdr, hdr->cmdsn); 1334 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1335 return -1; 1336 1337 if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 1338 target_put_sess_cmd(&cmd->se_cmd); 1339 1340 return 0; 1341 } else if (cmd->unsolicited_data) 1342 iscsit_set_unsolicited_dataout(cmd); 1343 1344 } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) { 1345 /* 1346 * Immediate Data failed DataCRC and ERL>=1, 1347 * silently drop this PDU and let the initiator 1348 * plug the CmdSN gap. 1349 * 1350 * FIXME: Send Unsolicited NOPIN with reserved 1351 * TTT here to help the initiator figure out 1352 * the missing CmdSN, although they should be 1353 * intelligent enough to determine the missing 1354 * CmdSN and issue a retry to plug the sequence. 1355 */ 1356 cmd->i_state = ISTATE_REMOVE; 1357 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state); 1358 } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */ 1359 return -1; 1360 1361 return 0; 1362 } 1363 1364 static int 1365 iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 1366 unsigned char *buf) 1367 { 1368 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1369 int rc, immed_data; 1370 bool dump_payload = false; 1371 1372 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1373 if (rc < 0) 1374 return 0; 1375 /* 1376 * Allocation iovecs needed for struct socket operations for 1377 * traditional iSCSI block I/O. 1378 */ 1379 if (iscsit_allocate_iovecs(cmd) < 0) { 1380 return iscsit_reject_cmd(cmd, 1381 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 1382 } 1383 immed_data = cmd->immediate_data; 1384 1385 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1386 if (rc < 0) 1387 return rc; 1388 else if (rc > 0) 1389 dump_payload = true; 1390 1391 if (!immed_data) 1392 return 0; 1393 1394 return iscsit_get_immediate_data(cmd, hdr, dump_payload); 1395 } 1396 1397 static u32 iscsit_crc_sglist(const struct iscsit_cmd *cmd, u32 data_length, 1398 u32 padding, const u8 *pad_bytes) 1399 { 1400 struct scatterlist *sg = cmd->first_data_sg; 1401 unsigned int page_off = cmd->first_data_sg_off; 1402 u32 crc = ~0; 1403 1404 while (data_length) { 1405 u32 cur_len = min_t(u32, data_length, sg->length - page_off); 1406 const void *virt; 1407 1408 virt = kmap_local_page(sg_page(sg)) + sg->offset + page_off; 1409 crc = crc32c(crc, virt, cur_len); 1410 kunmap_local(virt); 1411 1412 /* iscsit_map_iovec has already checked for invalid sg pointers */ 1413 sg = sg_next(sg); 1414 1415 page_off = 0; 1416 data_length -= cur_len; 1417 } 1418 1419 if (padding) 1420 crc = crc32c(crc, pad_bytes, padding); 1421 1422 return ~crc; 1423 } 1424 1425 static u32 iscsit_crc_buf(const void *buf, u32 payload_length, 1426 u32 padding, const void *pad_bytes) 1427 { 1428 u32 crc = ~0; 1429 1430 crc = crc32c(crc, buf, payload_length); 1431 1432 if (padding) 1433 crc = crc32c(crc, pad_bytes, padding); 1434 1435 return ~crc; 1436 } 1437 1438 int 1439 __iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf, 1440 struct iscsit_cmd *cmd, u32 payload_length, 1441 bool *success) 1442 { 1443 struct iscsi_data *hdr = buf; 1444 struct se_cmd *se_cmd; 1445 int rc; 1446 1447 /* iSCSI write */ 1448 atomic_long_add(payload_length, &conn->sess->rx_data_octets); 1449 1450 pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x," 1451 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 1452 hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset), 1453 payload_length, conn->cid); 1454 1455 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { 1456 pr_err("Command ITT: 0x%08x received DataOUT after" 1457 " last DataOUT received, dumping payload\n", 1458 cmd->init_task_tag); 1459 return iscsit_dump_data_payload(conn, payload_length, 1); 1460 } 1461 1462 if (cmd->data_direction != DMA_TO_DEVICE) { 1463 pr_err("Command ITT: 0x%08x received DataOUT for a" 1464 " NON-WRITE command.\n", cmd->init_task_tag); 1465 return iscsit_dump_data_payload(conn, payload_length, 1); 1466 } 1467 se_cmd = &cmd->se_cmd; 1468 iscsit_mod_dataout_timer(cmd); 1469 1470 if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) { 1471 pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n", 1472 be32_to_cpu(hdr->offset), payload_length, 1473 cmd->se_cmd.data_length); 1474 return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf); 1475 } 1476 1477 if (cmd->unsolicited_data) { 1478 int dump_unsolicited_data = 0; 1479 1480 if (conn->sess->sess_ops->InitialR2T) { 1481 pr_err("Received unexpected unsolicited data" 1482 " while InitialR2T=Yes, protocol error.\n"); 1483 transport_send_check_condition_and_sense(&cmd->se_cmd, 1484 TCM_UNEXPECTED_UNSOLICITED_DATA, 0); 1485 return -1; 1486 } 1487 /* 1488 * Special case for dealing with Unsolicited DataOUT 1489 * and Unsupported SAM WRITE Opcodes and SE resource allocation 1490 * failures; 1491 */ 1492 1493 /* Something's amiss if we're not in WRITE_PENDING state... */ 1494 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING); 1495 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE)) 1496 dump_unsolicited_data = 1; 1497 1498 if (dump_unsolicited_data) { 1499 /* 1500 * Check if a delayed TASK_ABORTED status needs to 1501 * be sent now if the ISCSI_FLAG_CMD_FINAL has been 1502 * received with the unsolicited data out. 1503 */ 1504 if (hdr->flags & ISCSI_FLAG_CMD_FINAL) 1505 iscsit_stop_dataout_timer(cmd); 1506 1507 return iscsit_dump_data_payload(conn, payload_length, 1); 1508 } 1509 } else { 1510 /* 1511 * For the normal solicited data path: 1512 * 1513 * Check for a delayed TASK_ABORTED status and dump any 1514 * incoming data out payload if one exists. Also, when the 1515 * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current 1516 * data out sequence, we decrement outstanding_r2ts. Once 1517 * outstanding_r2ts reaches zero, go ahead and send the delayed 1518 * TASK_ABORTED status. 1519 */ 1520 if (se_cmd->transport_state & CMD_T_ABORTED) { 1521 if (hdr->flags & ISCSI_FLAG_CMD_FINAL && 1522 --cmd->outstanding_r2ts < 1) 1523 iscsit_stop_dataout_timer(cmd); 1524 1525 return iscsit_dump_data_payload(conn, payload_length, 1); 1526 } 1527 } 1528 /* 1529 * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and 1530 * within-command recovery checks before receiving the payload. 1531 */ 1532 rc = iscsit_check_pre_dataout(cmd, buf); 1533 if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY) 1534 return 0; 1535 else if (rc == DATAOUT_CANNOT_RECOVER) 1536 return -1; 1537 *success = true; 1538 return 0; 1539 } 1540 EXPORT_SYMBOL(__iscsit_check_dataout_hdr); 1541 1542 int 1543 iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf, 1544 struct iscsit_cmd **out_cmd) 1545 { 1546 struct iscsi_data *hdr = buf; 1547 struct iscsit_cmd *cmd; 1548 u32 payload_length = ntoh24(hdr->dlength); 1549 int rc; 1550 bool success = false; 1551 1552 if (!payload_length) { 1553 pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n"); 1554 return 0; 1555 } 1556 1557 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { 1558 pr_err_ratelimited("DataSegmentLength: %u is greater than" 1559 " MaxXmitDataSegmentLength: %u\n", payload_length, 1560 conn->conn_ops->MaxXmitDataSegmentLength); 1561 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf); 1562 } 1563 1564 cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length); 1565 if (!cmd) 1566 return 0; 1567 1568 rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success); 1569 1570 if (success) 1571 *out_cmd = cmd; 1572 1573 return rc; 1574 } 1575 EXPORT_SYMBOL(iscsit_check_dataout_hdr); 1576 1577 static int 1578 iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 1579 struct iscsi_data *hdr) 1580 { 1581 struct kvec *iov; 1582 u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0; 1583 u32 payload_length; 1584 int iov_ret, data_crc_failed = 0; 1585 1586 payload_length = min_t(u32, cmd->se_cmd.data_length, 1587 ntoh24(hdr->dlength)); 1588 rx_size += payload_length; 1589 iov = &cmd->iov_data[0]; 1590 1591 iov_ret = iscsit_map_iovec(cmd, iov, cmd->orig_iov_data_count - 2, 1592 be32_to_cpu(hdr->offset), payload_length); 1593 if (iov_ret < 0) 1594 return -1; 1595 1596 iov_count += iov_ret; 1597 1598 padding = ((-payload_length) & 3); 1599 if (padding != 0) { 1600 iov[iov_count].iov_base = cmd->pad_bytes; 1601 iov[iov_count++].iov_len = padding; 1602 rx_size += padding; 1603 pr_debug("Receiving %u padding bytes.\n", padding); 1604 } 1605 1606 if (conn->conn_ops->DataDigest) { 1607 iov[iov_count].iov_base = &checksum; 1608 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 1609 rx_size += ISCSI_CRC_LEN; 1610 } 1611 1612 WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count); 1613 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); 1614 1615 iscsit_unmap_iovec(cmd); 1616 1617 if (rx_got != rx_size) 1618 return -1; 1619 1620 if (conn->conn_ops->DataDigest) { 1621 u32 data_crc; 1622 1623 data_crc = iscsit_crc_sglist(cmd, payload_length, padding, 1624 cmd->pad_bytes); 1625 if (checksum != data_crc) { 1626 pr_err("ITT: 0x%08x, Offset: %u, Length: %u," 1627 " DataSN: 0x%08x, CRC32C DataDigest 0x%08x" 1628 " does not match computed 0x%08x\n", 1629 hdr->itt, hdr->offset, payload_length, 1630 hdr->datasn, checksum, data_crc); 1631 data_crc_failed = 1; 1632 } else { 1633 pr_debug("Got CRC32C DataDigest 0x%08x for" 1634 " %u bytes of Data Out\n", checksum, 1635 payload_length); 1636 } 1637 } 1638 1639 return data_crc_failed; 1640 } 1641 1642 int 1643 iscsit_check_dataout_payload(struct iscsit_cmd *cmd, struct iscsi_data *hdr, 1644 bool data_crc_failed) 1645 { 1646 struct iscsit_conn *conn = cmd->conn; 1647 int rc, ooo_cmdsn; 1648 /* 1649 * Increment post receive data and CRC values or perform 1650 * within-command recovery. 1651 */ 1652 rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed); 1653 if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)) 1654 return 0; 1655 else if (rc == DATAOUT_SEND_R2T) { 1656 iscsit_set_dataout_sequence_values(cmd); 1657 conn->conn_transport->iscsit_get_dataout(conn, cmd, false); 1658 } else if (rc == DATAOUT_SEND_TO_TRANSPORT) { 1659 /* 1660 * Handle extra special case for out of order 1661 * Unsolicited Data Out. 1662 */ 1663 spin_lock_bh(&cmd->istate_lock); 1664 ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN); 1665 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1666 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1667 spin_unlock_bh(&cmd->istate_lock); 1668 1669 iscsit_stop_dataout_timer(cmd); 1670 if (ooo_cmdsn) 1671 return 0; 1672 target_execute_cmd(&cmd->se_cmd); 1673 return 0; 1674 } else /* DATAOUT_CANNOT_RECOVER */ 1675 return -1; 1676 1677 return 0; 1678 } 1679 EXPORT_SYMBOL(iscsit_check_dataout_payload); 1680 1681 static int iscsit_handle_data_out(struct iscsit_conn *conn, unsigned char *buf) 1682 { 1683 struct iscsit_cmd *cmd = NULL; 1684 struct iscsi_data *hdr = (struct iscsi_data *)buf; 1685 int rc; 1686 bool data_crc_failed = false; 1687 1688 rc = iscsit_check_dataout_hdr(conn, buf, &cmd); 1689 if (rc < 0) 1690 return 0; 1691 else if (!cmd) 1692 return 0; 1693 1694 rc = iscsit_get_dataout(conn, cmd, hdr); 1695 if (rc < 0) 1696 return rc; 1697 else if (rc > 0) 1698 data_crc_failed = true; 1699 1700 return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed); 1701 } 1702 1703 int iscsit_setup_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 1704 struct iscsi_nopout *hdr) 1705 { 1706 u32 payload_length = ntoh24(hdr->dlength); 1707 1708 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) { 1709 pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n"); 1710 if (!cmd) 1711 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1712 (unsigned char *)hdr); 1713 1714 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, 1715 (unsigned char *)hdr); 1716 } 1717 1718 if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1719 pr_err("NOPOUT ITT is reserved, but Immediate Bit is" 1720 " not set, protocol error.\n"); 1721 if (!cmd) 1722 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1723 (unsigned char *)hdr); 1724 1725 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, 1726 (unsigned char *)hdr); 1727 } 1728 1729 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { 1730 pr_err("NOPOUT Ping Data DataSegmentLength: %u is" 1731 " greater than MaxXmitDataSegmentLength: %u, protocol" 1732 " error.\n", payload_length, 1733 conn->conn_ops->MaxXmitDataSegmentLength); 1734 if (!cmd) 1735 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1736 (unsigned char *)hdr); 1737 1738 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, 1739 (unsigned char *)hdr); 1740 } 1741 1742 pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x," 1743 " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n", 1744 hdr->itt == RESERVED_ITT ? "Response" : "Request", 1745 hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn, 1746 payload_length); 1747 /* 1748 * This is not a response to a Unsolicited NopIN, which means 1749 * it can either be a NOPOUT ping request (with a valid ITT), 1750 * or a NOPOUT not requesting a NOPIN (with a reserved ITT). 1751 * Either way, make sure we allocate an struct iscsit_cmd, as both 1752 * can contain ping data. 1753 */ 1754 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 1755 cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT; 1756 cmd->i_state = ISTATE_SEND_NOPIN; 1757 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1758 1 : 0); 1759 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 1760 cmd->targ_xfer_tag = 0xFFFFFFFF; 1761 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 1762 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 1763 cmd->data_direction = DMA_NONE; 1764 } 1765 1766 return 0; 1767 } 1768 EXPORT_SYMBOL(iscsit_setup_nop_out); 1769 1770 int iscsit_process_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 1771 struct iscsi_nopout *hdr) 1772 { 1773 struct iscsit_cmd *cmd_p = NULL; 1774 int cmdsn_ret = 0; 1775 /* 1776 * Initiator is expecting a NopIN ping reply.. 1777 */ 1778 if (hdr->itt != RESERVED_ITT) { 1779 if (!cmd) 1780 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 1781 (unsigned char *)hdr); 1782 1783 spin_lock_bh(&conn->cmd_lock); 1784 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1785 spin_unlock_bh(&conn->cmd_lock); 1786 1787 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 1788 1789 if (hdr->opcode & ISCSI_OP_IMMEDIATE) { 1790 iscsit_add_cmd_to_response_queue(cmd, conn, 1791 cmd->i_state); 1792 return 0; 1793 } 1794 1795 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, 1796 (unsigned char *)hdr, hdr->cmdsn); 1797 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 1798 return 0; 1799 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 1800 return -1; 1801 1802 return 0; 1803 } 1804 /* 1805 * This was a response to a unsolicited NOPIN ping. 1806 */ 1807 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { 1808 cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt)); 1809 if (!cmd_p) 1810 return -EINVAL; 1811 1812 iscsit_stop_nopin_response_timer(conn); 1813 1814 cmd_p->i_state = ISTATE_REMOVE; 1815 iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state); 1816 1817 iscsit_start_nopin_timer(conn); 1818 return 0; 1819 } 1820 /* 1821 * Otherwise, initiator is not expecting a NOPIN is response. 1822 * Just ignore for now. 1823 */ 1824 1825 if (cmd) 1826 iscsit_free_cmd(cmd, false); 1827 1828 return 0; 1829 } 1830 EXPORT_SYMBOL(iscsit_process_nop_out); 1831 1832 static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 1833 unsigned char *buf) 1834 { 1835 unsigned char *ping_data = NULL; 1836 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf; 1837 struct kvec *iov = NULL; 1838 u32 payload_length = ntoh24(hdr->dlength); 1839 int ret; 1840 1841 ret = iscsit_setup_nop_out(conn, cmd, hdr); 1842 if (ret < 0) 1843 return 0; 1844 /* 1845 * Handle NOP-OUT payload for traditional iSCSI sockets 1846 */ 1847 if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 1848 u32 checksum, data_crc, padding = 0; 1849 int niov = 0, rx_got, rx_size = payload_length; 1850 1851 ping_data = kzalloc(payload_length + 1, GFP_KERNEL); 1852 if (!ping_data) { 1853 ret = -1; 1854 goto out; 1855 } 1856 1857 iov = &cmd->iov_misc[0]; 1858 iov[niov].iov_base = ping_data; 1859 iov[niov++].iov_len = payload_length; 1860 1861 padding = ((-payload_length) & 3); 1862 if (padding != 0) { 1863 pr_debug("Receiving %u additional bytes" 1864 " for padding.\n", padding); 1865 iov[niov].iov_base = &cmd->pad_bytes; 1866 iov[niov++].iov_len = padding; 1867 rx_size += padding; 1868 } 1869 if (conn->conn_ops->DataDigest) { 1870 iov[niov].iov_base = &checksum; 1871 iov[niov++].iov_len = ISCSI_CRC_LEN; 1872 rx_size += ISCSI_CRC_LEN; 1873 } 1874 1875 WARN_ON_ONCE(niov > ARRAY_SIZE(cmd->iov_misc)); 1876 rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size); 1877 if (rx_got != rx_size) { 1878 ret = -1; 1879 goto out; 1880 } 1881 1882 if (conn->conn_ops->DataDigest) { 1883 data_crc = iscsit_crc_buf(ping_data, payload_length, 1884 padding, cmd->pad_bytes); 1885 if (checksum != data_crc) { 1886 pr_err("Ping data CRC32C DataDigest" 1887 " 0x%08x does not match computed 0x%08x\n", 1888 checksum, data_crc); 1889 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 1890 pr_err("Unable to recover from" 1891 " NOPOUT Ping DataCRC failure while in" 1892 " ERL=0.\n"); 1893 ret = -1; 1894 goto out; 1895 } else { 1896 /* 1897 * Silently drop this PDU and let the 1898 * initiator plug the CmdSN gap. 1899 */ 1900 pr_debug("Dropping NOPOUT" 1901 " Command CmdSN: 0x%08x due to" 1902 " DataCRC error.\n", hdr->cmdsn); 1903 ret = 0; 1904 goto out; 1905 } 1906 } else { 1907 pr_debug("Got CRC32C DataDigest" 1908 " 0x%08x for %u bytes of ping data.\n", 1909 checksum, payload_length); 1910 } 1911 } 1912 1913 ping_data[payload_length] = '\0'; 1914 /* 1915 * Attach ping data to struct iscsit_cmd->buf_ptr. 1916 */ 1917 cmd->buf_ptr = ping_data; 1918 cmd->buf_ptr_size = payload_length; 1919 1920 pr_debug("Got %u bytes of NOPOUT ping" 1921 " data.\n", payload_length); 1922 pr_debug("Ping Data: \"%s\"\n", ping_data); 1923 } 1924 1925 return iscsit_process_nop_out(conn, cmd, hdr); 1926 out: 1927 if (cmd) 1928 iscsit_free_cmd(cmd, false); 1929 1930 kfree(ping_data); 1931 return ret; 1932 } 1933 1934 static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf) 1935 { 1936 switch (iscsi_tmf) { 1937 case ISCSI_TM_FUNC_ABORT_TASK: 1938 return TMR_ABORT_TASK; 1939 case ISCSI_TM_FUNC_ABORT_TASK_SET: 1940 return TMR_ABORT_TASK_SET; 1941 case ISCSI_TM_FUNC_CLEAR_ACA: 1942 return TMR_CLEAR_ACA; 1943 case ISCSI_TM_FUNC_CLEAR_TASK_SET: 1944 return TMR_CLEAR_TASK_SET; 1945 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: 1946 return TMR_LUN_RESET; 1947 case ISCSI_TM_FUNC_TARGET_WARM_RESET: 1948 return TMR_TARGET_WARM_RESET; 1949 case ISCSI_TM_FUNC_TARGET_COLD_RESET: 1950 return TMR_TARGET_COLD_RESET; 1951 default: 1952 return TMR_UNKNOWN; 1953 } 1954 } 1955 1956 int 1957 iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 1958 unsigned char *buf) 1959 { 1960 struct se_tmr_req *se_tmr; 1961 struct iscsi_tmr_req *tmr_req; 1962 struct iscsi_tm *hdr; 1963 int out_of_order_cmdsn = 0, ret; 1964 u8 function, tcm_function = TMR_UNKNOWN; 1965 1966 hdr = (struct iscsi_tm *) buf; 1967 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 1968 function = hdr->flags; 1969 1970 pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:" 1971 " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:" 1972 " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function, 1973 hdr->rtt, hdr->refcmdsn, conn->cid); 1974 1975 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1976 ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 1977 hdr->rtt != RESERVED_ITT)) { 1978 pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n"); 1979 hdr->rtt = RESERVED_ITT; 1980 } 1981 1982 if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) && 1983 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 1984 pr_err("Task Management Request TASK_REASSIGN not" 1985 " issued as immediate command, bad iSCSI Initiator" 1986 "implementation\n"); 1987 return iscsit_add_reject_cmd(cmd, 1988 ISCSI_REASON_PROTOCOL_ERROR, buf); 1989 } 1990 if ((function != ISCSI_TM_FUNC_ABORT_TASK) && 1991 be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG) 1992 hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG); 1993 1994 cmd->data_direction = DMA_NONE; 1995 cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL); 1996 if (!cmd->tmr_req) { 1997 return iscsit_add_reject_cmd(cmd, 1998 ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1999 buf); 2000 } 2001 2002 __target_init_cmd(&cmd->se_cmd, &iscsi_ops, 2003 conn->sess->se_sess, 0, DMA_NONE, 2004 TCM_SIMPLE_TAG, cmd->sense_buffer + 2, 2005 scsilun_to_int(&hdr->lun), 2006 conn->cmd_cnt); 2007 2008 target_get_sess_cmd(&cmd->se_cmd, true); 2009 2010 /* 2011 * TASK_REASSIGN for ERL=2 / connection stays inside of 2012 * LIO-Target $FABRIC_MOD 2013 */ 2014 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { 2015 tcm_function = iscsit_convert_tmf(function); 2016 if (tcm_function == TMR_UNKNOWN) { 2017 pr_err("Unknown iSCSI TMR Function:" 2018 " 0x%02x\n", function); 2019 return iscsit_add_reject_cmd(cmd, 2020 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 2021 } 2022 } 2023 ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function, 2024 GFP_KERNEL); 2025 if (ret < 0) 2026 return iscsit_add_reject_cmd(cmd, 2027 ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 2028 2029 cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req; 2030 2031 cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC; 2032 cmd->i_state = ISTATE_SEND_TASKMGTRSP; 2033 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 2034 cmd->init_task_tag = hdr->itt; 2035 cmd->targ_xfer_tag = 0xFFFFFFFF; 2036 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 2037 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 2038 se_tmr = cmd->se_cmd.se_tmr_req; 2039 tmr_req = cmd->tmr_req; 2040 /* 2041 * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN 2042 */ 2043 if (function != ISCSI_TM_FUNC_TASK_REASSIGN) { 2044 ret = transport_lookup_tmr_lun(&cmd->se_cmd); 2045 if (ret < 0) { 2046 se_tmr->response = ISCSI_TMF_RSP_NO_LUN; 2047 goto attach; 2048 } 2049 } 2050 2051 switch (function) { 2052 case ISCSI_TM_FUNC_ABORT_TASK: 2053 se_tmr->response = iscsit_tmr_abort_task(cmd, buf); 2054 if (se_tmr->response) 2055 goto attach; 2056 break; 2057 case ISCSI_TM_FUNC_ABORT_TASK_SET: 2058 case ISCSI_TM_FUNC_CLEAR_ACA: 2059 case ISCSI_TM_FUNC_CLEAR_TASK_SET: 2060 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: 2061 break; 2062 case ISCSI_TM_FUNC_TARGET_WARM_RESET: 2063 if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) { 2064 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; 2065 goto attach; 2066 } 2067 break; 2068 case ISCSI_TM_FUNC_TARGET_COLD_RESET: 2069 if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) { 2070 se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED; 2071 goto attach; 2072 } 2073 break; 2074 case ISCSI_TM_FUNC_TASK_REASSIGN: 2075 se_tmr->response = iscsit_tmr_task_reassign(cmd, buf); 2076 /* 2077 * Perform sanity checks on the ExpDataSN only if the 2078 * TASK_REASSIGN was successful. 2079 */ 2080 if (se_tmr->response) 2081 break; 2082 2083 if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0) 2084 return iscsit_add_reject_cmd(cmd, 2085 ISCSI_REASON_BOOKMARK_INVALID, buf); 2086 break; 2087 default: 2088 pr_err("Unknown TMR function: 0x%02x, protocol" 2089 " error.\n", function); 2090 se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED; 2091 goto attach; 2092 } 2093 2094 if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) && 2095 (se_tmr->response == ISCSI_TMF_RSP_COMPLETE)) 2096 se_tmr->call_transport = 1; 2097 attach: 2098 spin_lock_bh(&conn->cmd_lock); 2099 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 2100 spin_unlock_bh(&conn->cmd_lock); 2101 2102 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 2103 int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 2104 if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) { 2105 out_of_order_cmdsn = 1; 2106 } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) { 2107 target_put_sess_cmd(&cmd->se_cmd); 2108 return 0; 2109 } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) { 2110 return -1; 2111 } 2112 } 2113 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 2114 2115 if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE)) 2116 return 0; 2117 /* 2118 * Found the referenced task, send to transport for processing. 2119 */ 2120 if (se_tmr->call_transport) 2121 return transport_generic_handle_tmr(&cmd->se_cmd); 2122 2123 /* 2124 * Could not find the referenced LUN, task, or Task Management 2125 * command not authorized or supported. Change state and 2126 * let the tx_thread send the response. 2127 * 2128 * For connection recovery, this is also the default action for 2129 * TMR TASK_REASSIGN. 2130 */ 2131 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2132 target_put_sess_cmd(&cmd->se_cmd); 2133 return 0; 2134 } 2135 EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd); 2136 2137 /* #warning FIXME: Support Text Command parameters besides SendTargets */ 2138 int 2139 iscsit_setup_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 2140 struct iscsi_text *hdr) 2141 { 2142 u32 payload_length = ntoh24(hdr->dlength); 2143 2144 if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { 2145 pr_err("Unable to accept text parameter length: %u" 2146 "greater than MaxXmitDataSegmentLength %u.\n", 2147 payload_length, conn->conn_ops->MaxXmitDataSegmentLength); 2148 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, 2149 (unsigned char *)hdr); 2150 } 2151 2152 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) || 2153 (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) { 2154 pr_err("Multi sequence text commands currently not supported\n"); 2155 return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED, 2156 (unsigned char *)hdr); 2157 } 2158 2159 pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x," 2160 " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn, 2161 hdr->exp_statsn, payload_length); 2162 2163 cmd->iscsi_opcode = ISCSI_OP_TEXT; 2164 cmd->i_state = ISTATE_SEND_TEXTRSP; 2165 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 2166 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 2167 cmd->targ_xfer_tag = 0xFFFFFFFF; 2168 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 2169 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 2170 cmd->data_direction = DMA_NONE; 2171 kfree(cmd->text_in_ptr); 2172 cmd->text_in_ptr = NULL; 2173 2174 return 0; 2175 } 2176 EXPORT_SYMBOL(iscsit_setup_text_cmd); 2177 2178 int 2179 iscsit_process_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 2180 struct iscsi_text *hdr) 2181 { 2182 unsigned char *text_in = cmd->text_in_ptr, *text_ptr; 2183 int cmdsn_ret; 2184 2185 if (!text_in) { 2186 cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt); 2187 if (cmd->targ_xfer_tag == 0xFFFFFFFF) { 2188 pr_err("Unable to locate text_in buffer for sendtargets" 2189 " discovery\n"); 2190 goto reject; 2191 } 2192 goto empty_sendtargets; 2193 } 2194 if (strncmp("SendTargets=", text_in, 12) != 0) { 2195 pr_err("Received Text Data that is not" 2196 " SendTargets, cannot continue.\n"); 2197 goto reject; 2198 } 2199 /* '=' confirmed in strncmp */ 2200 text_ptr = strchr(text_in, '='); 2201 BUG_ON(!text_ptr); 2202 if (!strncmp("=All", text_ptr, 5)) { 2203 cmd->cmd_flags |= ICF_SENDTARGETS_ALL; 2204 } else if (!strncmp("=iqn.", text_ptr, 5) || 2205 !strncmp("=eui.", text_ptr, 5)) { 2206 cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE; 2207 } else { 2208 pr_err("Unable to locate valid SendTargets%s value\n", 2209 text_ptr); 2210 goto reject; 2211 } 2212 2213 spin_lock_bh(&conn->cmd_lock); 2214 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 2215 spin_unlock_bh(&conn->cmd_lock); 2216 2217 empty_sendtargets: 2218 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 2219 2220 if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 2221 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, 2222 (unsigned char *)hdr, hdr->cmdsn); 2223 if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 2224 return -1; 2225 2226 return 0; 2227 } 2228 2229 return iscsit_execute_cmd(cmd, 0); 2230 2231 reject: 2232 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, 2233 (unsigned char *)hdr); 2234 } 2235 EXPORT_SYMBOL(iscsit_process_text_cmd); 2236 2237 static int 2238 iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 2239 unsigned char *buf) 2240 { 2241 struct iscsi_text *hdr = (struct iscsi_text *)buf; 2242 char *text_in = NULL; 2243 u32 payload_length = ntoh24(hdr->dlength); 2244 int rx_size, rc; 2245 2246 rc = iscsit_setup_text_cmd(conn, cmd, hdr); 2247 if (rc < 0) 2248 return 0; 2249 2250 rx_size = payload_length; 2251 if (payload_length) { 2252 u32 checksum = 0, data_crc = 0; 2253 u32 padding = 0; 2254 int niov = 0, rx_got; 2255 struct kvec iov[2]; 2256 2257 rx_size = ALIGN(payload_length, 4); 2258 text_in = kzalloc(rx_size, GFP_KERNEL); 2259 if (!text_in) 2260 goto reject; 2261 2262 cmd->text_in_ptr = text_in; 2263 2264 memset(iov, 0, sizeof(iov)); 2265 iov[niov].iov_base = text_in; 2266 iov[niov++].iov_len = rx_size; 2267 2268 padding = rx_size - payload_length; 2269 if (padding) 2270 pr_debug("Receiving %u additional bytes" 2271 " for padding.\n", padding); 2272 if (conn->conn_ops->DataDigest) { 2273 iov[niov].iov_base = &checksum; 2274 iov[niov++].iov_len = ISCSI_CRC_LEN; 2275 rx_size += ISCSI_CRC_LEN; 2276 } 2277 2278 WARN_ON_ONCE(niov > ARRAY_SIZE(iov)); 2279 rx_got = rx_data(conn, &iov[0], niov, rx_size); 2280 if (rx_got != rx_size) 2281 goto reject; 2282 2283 if (conn->conn_ops->DataDigest) { 2284 data_crc = iscsit_crc_buf(text_in, rx_size, 0, NULL); 2285 if (checksum != data_crc) { 2286 pr_err("Text data CRC32C DataDigest" 2287 " 0x%08x does not match computed" 2288 " 0x%08x\n", checksum, data_crc); 2289 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2290 pr_err("Unable to recover from" 2291 " Text Data digest failure while in" 2292 " ERL=0.\n"); 2293 goto reject; 2294 } else { 2295 /* 2296 * Silently drop this PDU and let the 2297 * initiator plug the CmdSN gap. 2298 */ 2299 pr_debug("Dropping Text" 2300 " Command CmdSN: 0x%08x due to" 2301 " DataCRC error.\n", hdr->cmdsn); 2302 kfree(text_in); 2303 return 0; 2304 } 2305 } else { 2306 pr_debug("Got CRC32C DataDigest" 2307 " 0x%08x for %u bytes of text data.\n", 2308 checksum, payload_length); 2309 } 2310 } 2311 text_in[payload_length - 1] = '\0'; 2312 pr_debug("Successfully read %d bytes of text" 2313 " data.\n", payload_length); 2314 } 2315 2316 return iscsit_process_text_cmd(conn, cmd, hdr); 2317 2318 reject: 2319 kfree(cmd->text_in_ptr); 2320 cmd->text_in_ptr = NULL; 2321 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); 2322 } 2323 2324 int iscsit_logout_closesession(struct iscsit_cmd *cmd, struct iscsit_conn *conn) 2325 { 2326 struct iscsit_conn *conn_p; 2327 struct iscsit_session *sess = conn->sess; 2328 2329 pr_debug("Received logout request CLOSESESSION on CID: %hu" 2330 " for SID: %u.\n", conn->cid, conn->sess->sid); 2331 2332 atomic_set(&sess->session_logout, 1); 2333 atomic_set(&conn->conn_logout_remove, 1); 2334 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION; 2335 2336 iscsit_inc_conn_usage_count(conn); 2337 iscsit_inc_session_usage_count(sess); 2338 2339 spin_lock_bh(&sess->conn_lock); 2340 list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) { 2341 if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN) 2342 continue; 2343 2344 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 2345 conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT; 2346 } 2347 spin_unlock_bh(&sess->conn_lock); 2348 2349 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2350 2351 return 0; 2352 } 2353 2354 int iscsit_logout_closeconnection(struct iscsit_cmd *cmd, struct iscsit_conn *conn) 2355 { 2356 struct iscsit_conn *l_conn; 2357 struct iscsit_session *sess = conn->sess; 2358 2359 pr_debug("Received logout request CLOSECONNECTION for CID:" 2360 " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); 2361 2362 /* 2363 * A Logout Request with a CLOSECONNECTION reason code for a CID 2364 * can arrive on a connection with a differing CID. 2365 */ 2366 if (conn->cid == cmd->logout_cid) { 2367 spin_lock_bh(&conn->state_lock); 2368 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 2369 conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; 2370 2371 atomic_set(&conn->conn_logout_remove, 1); 2372 conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION; 2373 iscsit_inc_conn_usage_count(conn); 2374 2375 spin_unlock_bh(&conn->state_lock); 2376 } else { 2377 /* 2378 * Handle all different cid CLOSECONNECTION requests in 2379 * iscsit_logout_post_handler_diffcid() as to give enough 2380 * time for any non immediate command's CmdSN to be 2381 * acknowledged on the connection in question. 2382 * 2383 * Here we simply make sure the CID is still around. 2384 */ 2385 l_conn = iscsit_get_conn_from_cid(sess, 2386 cmd->logout_cid); 2387 if (!l_conn) { 2388 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; 2389 iscsit_add_cmd_to_response_queue(cmd, conn, 2390 cmd->i_state); 2391 return 0; 2392 } 2393 2394 iscsit_dec_conn_usage_count(l_conn); 2395 } 2396 2397 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2398 2399 return 0; 2400 } 2401 2402 int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *cmd, struct iscsit_conn *conn) 2403 { 2404 struct iscsit_session *sess = conn->sess; 2405 2406 pr_debug("Received explicit REMOVECONNFORRECOVERY logout for" 2407 " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid); 2408 2409 if (sess->sess_ops->ErrorRecoveryLevel != 2) { 2410 pr_err("Received Logout Request REMOVECONNFORRECOVERY" 2411 " while ERL!=2.\n"); 2412 cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED; 2413 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2414 return 0; 2415 } 2416 2417 if (conn->cid == cmd->logout_cid) { 2418 pr_err("Received Logout Request REMOVECONNFORRECOVERY" 2419 " with CID: %hu on CID: %hu, implementation error.\n", 2420 cmd->logout_cid, conn->cid); 2421 cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED; 2422 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2423 return 0; 2424 } 2425 2426 iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); 2427 2428 return 0; 2429 } 2430 2431 int 2432 iscsit_handle_logout_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd, 2433 unsigned char *buf) 2434 { 2435 int cmdsn_ret, logout_remove = 0; 2436 u8 reason_code = 0; 2437 struct iscsi_logout *hdr; 2438 struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn); 2439 2440 hdr = (struct iscsi_logout *) buf; 2441 reason_code = (hdr->flags & 0x7f); 2442 2443 if (tiqn) { 2444 spin_lock(&tiqn->logout_stats.lock); 2445 if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) 2446 tiqn->logout_stats.normal_logouts++; 2447 else 2448 tiqn->logout_stats.abnormal_logouts++; 2449 spin_unlock(&tiqn->logout_stats.lock); 2450 } 2451 2452 pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x" 2453 " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n", 2454 hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code, 2455 hdr->cid, conn->cid); 2456 2457 if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { 2458 pr_err("Received logout request on connection that" 2459 " is not in logged in state, ignoring request.\n"); 2460 iscsit_free_cmd(cmd, false); 2461 return 0; 2462 } 2463 2464 cmd->iscsi_opcode = ISCSI_OP_LOGOUT; 2465 cmd->i_state = ISTATE_SEND_LOGOUTRSP; 2466 cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0); 2467 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 2468 cmd->targ_xfer_tag = 0xFFFFFFFF; 2469 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 2470 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 2471 cmd->logout_cid = be16_to_cpu(hdr->cid); 2472 cmd->logout_reason = reason_code; 2473 cmd->data_direction = DMA_NONE; 2474 2475 /* 2476 * We need to sleep in these cases (by returning 1) until the Logout 2477 * Response gets sent in the tx thread. 2478 */ 2479 if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) || 2480 ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) && 2481 be16_to_cpu(hdr->cid) == conn->cid)) 2482 logout_remove = 1; 2483 2484 spin_lock_bh(&conn->cmd_lock); 2485 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 2486 spin_unlock_bh(&conn->cmd_lock); 2487 2488 if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY) 2489 iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); 2490 2491 /* 2492 * Immediate commands are executed, well, immediately. 2493 * Non-Immediate Logout Commands are executed in CmdSN order. 2494 */ 2495 if (cmd->immediate_cmd) { 2496 int ret = iscsit_execute_cmd(cmd, 0); 2497 2498 if (ret < 0) 2499 return ret; 2500 } else { 2501 cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn); 2502 if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) 2503 logout_remove = 0; 2504 else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) 2505 return -1; 2506 } 2507 2508 return logout_remove; 2509 } 2510 EXPORT_SYMBOL(iscsit_handle_logout_cmd); 2511 2512 int iscsit_handle_snack( 2513 struct iscsit_conn *conn, 2514 unsigned char *buf) 2515 { 2516 struct iscsi_snack *hdr; 2517 2518 hdr = (struct iscsi_snack *) buf; 2519 hdr->flags &= ~ISCSI_FLAG_CMD_FINAL; 2520 2521 pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:" 2522 " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x," 2523 " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags, 2524 hdr->begrun, hdr->runlength, conn->cid); 2525 2526 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2527 pr_err("Initiator sent SNACK request while in" 2528 " ErrorRecoveryLevel=0.\n"); 2529 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 2530 buf); 2531 } 2532 /* 2533 * SNACK_DATA and SNACK_R2T are both 0, so check which function to 2534 * call from inside iscsi_send_recovery_datain_or_r2t(). 2535 */ 2536 switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) { 2537 case 0: 2538 return iscsit_handle_recovery_datain_or_r2t(conn, buf, 2539 hdr->itt, 2540 be32_to_cpu(hdr->ttt), 2541 be32_to_cpu(hdr->begrun), 2542 be32_to_cpu(hdr->runlength)); 2543 case ISCSI_FLAG_SNACK_TYPE_STATUS: 2544 return iscsit_handle_status_snack(conn, hdr->itt, 2545 be32_to_cpu(hdr->ttt), 2546 be32_to_cpu(hdr->begrun), be32_to_cpu(hdr->runlength)); 2547 case ISCSI_FLAG_SNACK_TYPE_DATA_ACK: 2548 return iscsit_handle_data_ack(conn, be32_to_cpu(hdr->ttt), 2549 be32_to_cpu(hdr->begrun), 2550 be32_to_cpu(hdr->runlength)); 2551 case ISCSI_FLAG_SNACK_TYPE_RDATA: 2552 /* FIXME: Support R-Data SNACK */ 2553 pr_err("R-Data SNACK Not Supported.\n"); 2554 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 2555 buf); 2556 default: 2557 pr_err("Unknown SNACK type 0x%02x, protocol" 2558 " error.\n", hdr->flags & 0x0f); 2559 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 2560 buf); 2561 } 2562 2563 return 0; 2564 } 2565 EXPORT_SYMBOL(iscsit_handle_snack); 2566 2567 static void iscsit_rx_thread_wait_for_tcp(struct iscsit_conn *conn) 2568 { 2569 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || 2570 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { 2571 wait_for_completion_interruptible_timeout( 2572 &conn->rx_half_close_comp, 2573 ISCSI_RX_THREAD_TCP_TIMEOUT * HZ); 2574 } 2575 } 2576 2577 static int iscsit_handle_immediate_data( 2578 struct iscsit_cmd *cmd, 2579 struct iscsi_scsi_req *hdr, 2580 u32 length) 2581 { 2582 int iov_ret, rx_got = 0, rx_size = 0; 2583 u32 checksum, iov_count = 0, padding = 0; 2584 struct iscsit_conn *conn = cmd->conn; 2585 struct kvec *iov; 2586 void *overflow_buf = NULL; 2587 2588 BUG_ON(cmd->write_data_done > cmd->se_cmd.data_length); 2589 rx_size = min(cmd->se_cmd.data_length - cmd->write_data_done, length); 2590 iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, 2591 cmd->orig_iov_data_count - 2, 2592 cmd->write_data_done, rx_size); 2593 if (iov_ret < 0) 2594 return IMMEDIATE_DATA_CANNOT_RECOVER; 2595 2596 iov_count = iov_ret; 2597 iov = &cmd->iov_data[0]; 2598 if (rx_size < length) { 2599 /* 2600 * Special case: length of immediate data exceeds the data 2601 * buffer size derived from the CDB. 2602 */ 2603 overflow_buf = kmalloc(length - rx_size, GFP_KERNEL); 2604 if (!overflow_buf) { 2605 iscsit_unmap_iovec(cmd); 2606 return IMMEDIATE_DATA_CANNOT_RECOVER; 2607 } 2608 cmd->overflow_buf = overflow_buf; 2609 iov[iov_count].iov_base = overflow_buf; 2610 iov[iov_count].iov_len = length - rx_size; 2611 iov_count++; 2612 rx_size = length; 2613 } 2614 2615 padding = ((-length) & 3); 2616 if (padding != 0) { 2617 iov[iov_count].iov_base = cmd->pad_bytes; 2618 iov[iov_count++].iov_len = padding; 2619 rx_size += padding; 2620 } 2621 2622 if (conn->conn_ops->DataDigest) { 2623 iov[iov_count].iov_base = &checksum; 2624 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 2625 rx_size += ISCSI_CRC_LEN; 2626 } 2627 2628 WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count); 2629 rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size); 2630 2631 iscsit_unmap_iovec(cmd); 2632 2633 if (rx_got != rx_size) { 2634 iscsit_rx_thread_wait_for_tcp(conn); 2635 return IMMEDIATE_DATA_CANNOT_RECOVER; 2636 } 2637 2638 if (conn->conn_ops->DataDigest) { 2639 u32 data_crc; 2640 2641 data_crc = iscsit_crc_sglist(cmd, length, padding, 2642 cmd->pad_bytes); 2643 if (checksum != data_crc) { 2644 pr_err("ImmediateData CRC32C DataDigest 0x%08x" 2645 " does not match computed 0x%08x\n", checksum, 2646 data_crc); 2647 2648 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 2649 pr_err("Unable to recover from" 2650 " Immediate Data digest failure while" 2651 " in ERL=0.\n"); 2652 iscsit_reject_cmd(cmd, 2653 ISCSI_REASON_DATA_DIGEST_ERROR, 2654 (unsigned char *)hdr); 2655 return IMMEDIATE_DATA_CANNOT_RECOVER; 2656 } else { 2657 iscsit_reject_cmd(cmd, 2658 ISCSI_REASON_DATA_DIGEST_ERROR, 2659 (unsigned char *)hdr); 2660 return IMMEDIATE_DATA_ERL1_CRC_FAILURE; 2661 } 2662 } else { 2663 pr_debug("Got CRC32C DataDigest 0x%08x for" 2664 " %u bytes of Immediate Data\n", checksum, 2665 length); 2666 } 2667 } 2668 2669 cmd->write_data_done += length; 2670 2671 if (cmd->write_data_done == cmd->se_cmd.data_length) { 2672 spin_lock_bh(&cmd->istate_lock); 2673 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 2674 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 2675 spin_unlock_bh(&cmd->istate_lock); 2676 } 2677 2678 return IMMEDIATE_DATA_NORMAL_OPERATION; 2679 } 2680 2681 /* #warning iscsi_build_conn_drop_async_message() only sends out on connections 2682 with active network interface */ 2683 static void iscsit_build_conn_drop_async_message(struct iscsit_conn *conn) 2684 { 2685 struct iscsit_cmd *cmd; 2686 struct iscsit_conn *conn_p; 2687 bool found = false; 2688 2689 lockdep_assert_held(&conn->sess->conn_lock); 2690 2691 /* 2692 * Only send a Asynchronous Message on connections whos network 2693 * interface is still functional. 2694 */ 2695 list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) { 2696 if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) { 2697 iscsit_inc_conn_usage_count(conn_p); 2698 found = true; 2699 break; 2700 } 2701 } 2702 2703 if (!found) 2704 return; 2705 2706 cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING); 2707 if (!cmd) { 2708 iscsit_dec_conn_usage_count(conn_p); 2709 return; 2710 } 2711 2712 cmd->logout_cid = conn->cid; 2713 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2714 cmd->i_state = ISTATE_SEND_ASYNCMSG; 2715 2716 spin_lock_bh(&conn_p->cmd_lock); 2717 list_add_tail(&cmd->i_conn_node, &conn_p->conn_cmd_list); 2718 spin_unlock_bh(&conn_p->cmd_lock); 2719 2720 iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state); 2721 iscsit_dec_conn_usage_count(conn_p); 2722 } 2723 2724 static int iscsit_send_conn_drop_async_message( 2725 struct iscsit_cmd *cmd, 2726 struct iscsit_conn *conn) 2727 { 2728 struct iscsi_async *hdr; 2729 2730 cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT; 2731 2732 hdr = (struct iscsi_async *) cmd->pdu; 2733 hdr->opcode = ISCSI_OP_ASYNC_EVENT; 2734 hdr->flags = ISCSI_FLAG_CMD_FINAL; 2735 cmd->init_task_tag = RESERVED_ITT; 2736 cmd->targ_xfer_tag = 0xFFFFFFFF; 2737 put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]); 2738 cmd->stat_sn = conn->stat_sn++; 2739 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2740 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2741 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 2742 hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION; 2743 hdr->param1 = cpu_to_be16(cmd->logout_cid); 2744 hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait); 2745 hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain); 2746 2747 pr_debug("Sending Connection Dropped Async Message StatSN:" 2748 " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn, 2749 cmd->logout_cid, conn->cid); 2750 2751 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); 2752 } 2753 2754 static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *conn) 2755 { 2756 if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) || 2757 (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) { 2758 wait_for_completion_interruptible_timeout( 2759 &conn->tx_half_close_comp, 2760 ISCSI_TX_THREAD_TCP_TIMEOUT * HZ); 2761 } 2762 } 2763 2764 void 2765 iscsit_build_datain_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn, 2766 struct iscsi_datain *datain, struct iscsi_data_rsp *hdr, 2767 bool set_statsn) 2768 { 2769 hdr->opcode = ISCSI_OP_SCSI_DATA_IN; 2770 hdr->flags = datain->flags; 2771 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 2772 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 2773 hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW; 2774 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2775 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 2776 hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW; 2777 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 2778 } 2779 } 2780 hton24(hdr->dlength, datain->length); 2781 if (hdr->flags & ISCSI_FLAG_DATA_ACK) 2782 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 2783 (struct scsi_lun *)&hdr->lun); 2784 else 2785 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2786 2787 hdr->itt = cmd->init_task_tag; 2788 2789 if (hdr->flags & ISCSI_FLAG_DATA_ACK) 2790 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2791 else 2792 hdr->ttt = cpu_to_be32(0xFFFFFFFF); 2793 if (set_statsn) 2794 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2795 else 2796 hdr->statsn = cpu_to_be32(0xFFFFFFFF); 2797 2798 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2799 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 2800 hdr->datasn = cpu_to_be32(datain->data_sn); 2801 hdr->offset = cpu_to_be32(datain->offset); 2802 2803 pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x," 2804 " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n", 2805 cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn), 2806 ntohl(hdr->offset), datain->length, conn->cid); 2807 } 2808 EXPORT_SYMBOL(iscsit_build_datain_pdu); 2809 2810 static int iscsit_send_datain(struct iscsit_cmd *cmd, struct iscsit_conn *conn) 2811 { 2812 struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0]; 2813 struct iscsi_datain datain; 2814 struct iscsi_datain_req *dr; 2815 int eodr = 0, ret; 2816 bool set_statsn = false; 2817 2818 memset(&datain, 0, sizeof(struct iscsi_datain)); 2819 dr = iscsit_get_datain_values(cmd, &datain); 2820 if (!dr) { 2821 pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n", 2822 cmd->init_task_tag); 2823 return -1; 2824 } 2825 /* 2826 * Be paranoid and double check the logic for now. 2827 */ 2828 if ((datain.offset + datain.length) > cmd->se_cmd.data_length) { 2829 pr_err("Command ITT: 0x%08x, datain.offset: %u and" 2830 " datain.length: %u exceeds cmd->data_length: %u\n", 2831 cmd->init_task_tag, datain.offset, datain.length, 2832 cmd->se_cmd.data_length); 2833 return -1; 2834 } 2835 2836 atomic_long_add(datain.length, &conn->sess->tx_data_octets); 2837 /* 2838 * Special case for successfully execution w/ both DATAIN 2839 * and Sense Data. 2840 */ 2841 if ((datain.flags & ISCSI_FLAG_DATA_STATUS) && 2842 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) 2843 datain.flags &= ~ISCSI_FLAG_DATA_STATUS; 2844 else { 2845 if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) || 2846 (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) { 2847 iscsit_increment_maxcmdsn(cmd, conn->sess); 2848 cmd->stat_sn = conn->stat_sn++; 2849 set_statsn = true; 2850 } else if (dr->dr_complete == 2851 DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY) 2852 set_statsn = true; 2853 } 2854 2855 iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn); 2856 2857 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0); 2858 if (ret < 0) 2859 return ret; 2860 2861 if (dr->dr_complete) { 2862 eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ? 2863 2 : 1; 2864 iscsit_free_datain_req(cmd, dr); 2865 } 2866 2867 return eodr; 2868 } 2869 2870 int 2871 iscsit_build_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn, 2872 struct iscsi_logout_rsp *hdr) 2873 { 2874 struct iscsit_conn *logout_conn = NULL; 2875 struct iscsi_conn_recovery *cr = NULL; 2876 struct iscsit_session *sess = conn->sess; 2877 /* 2878 * The actual shutting down of Sessions and/or Connections 2879 * for CLOSESESSION and CLOSECONNECTION Logout Requests 2880 * is done in scsi_logout_post_handler(). 2881 */ 2882 switch (cmd->logout_reason) { 2883 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 2884 pr_debug("iSCSI session logout successful, setting" 2885 " logout response to ISCSI_LOGOUT_SUCCESS.\n"); 2886 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2887 break; 2888 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 2889 if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND) 2890 break; 2891 /* 2892 * For CLOSECONNECTION logout requests carrying 2893 * a matching logout CID -> local CID, the reference 2894 * for the local CID will have been incremented in 2895 * iscsi_logout_closeconnection(). 2896 * 2897 * For CLOSECONNECTION logout requests carrying 2898 * a different CID than the connection it arrived 2899 * on, the connection responding to cmd->logout_cid 2900 * is stopped in iscsit_logout_post_handler_diffcid(). 2901 */ 2902 2903 pr_debug("iSCSI CID: %hu logout on CID: %hu" 2904 " successful.\n", cmd->logout_cid, conn->cid); 2905 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2906 break; 2907 case ISCSI_LOGOUT_REASON_RECOVERY: 2908 if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) || 2909 (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED)) 2910 break; 2911 /* 2912 * If the connection is still active from our point of view 2913 * force connection recovery to occur. 2914 */ 2915 logout_conn = iscsit_get_conn_from_cid_rcfr(sess, 2916 cmd->logout_cid); 2917 if (logout_conn) { 2918 iscsit_connection_reinstatement_rcfr(logout_conn); 2919 iscsit_dec_conn_usage_count(logout_conn); 2920 } 2921 2922 cr = iscsit_get_inactive_connection_recovery_entry( 2923 conn->sess, cmd->logout_cid); 2924 if (!cr) { 2925 pr_err("Unable to locate CID: %hu for" 2926 " REMOVECONNFORRECOVERY Logout Request.\n", 2927 cmd->logout_cid); 2928 cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND; 2929 break; 2930 } 2931 2932 iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn); 2933 2934 pr_debug("iSCSI REMOVECONNFORRECOVERY logout" 2935 " for recovery for CID: %hu on CID: %hu successful.\n", 2936 cmd->logout_cid, conn->cid); 2937 cmd->logout_response = ISCSI_LOGOUT_SUCCESS; 2938 break; 2939 default: 2940 pr_err("Unknown cmd->logout_reason: 0x%02x\n", 2941 cmd->logout_reason); 2942 return -1; 2943 } 2944 2945 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 2946 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2947 hdr->response = cmd->logout_response; 2948 hdr->itt = cmd->init_task_tag; 2949 cmd->stat_sn = conn->stat_sn++; 2950 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2951 2952 iscsit_increment_maxcmdsn(cmd, conn->sess); 2953 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2954 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 2955 2956 pr_debug("Built Logout Response ITT: 0x%08x StatSN:" 2957 " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n", 2958 cmd->init_task_tag, cmd->stat_sn, hdr->response, 2959 cmd->logout_cid, conn->cid); 2960 2961 return 0; 2962 } 2963 EXPORT_SYMBOL(iscsit_build_logout_rsp); 2964 2965 static int 2966 iscsit_send_logout(struct iscsit_cmd *cmd, struct iscsit_conn *conn) 2967 { 2968 int rc; 2969 2970 rc = iscsit_build_logout_rsp(cmd, conn, 2971 (struct iscsi_logout_rsp *)&cmd->pdu[0]); 2972 if (rc < 0) 2973 return rc; 2974 2975 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); 2976 } 2977 2978 void 2979 iscsit_build_nopin_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn, 2980 struct iscsi_nopin *hdr, bool nopout_response) 2981 { 2982 hdr->opcode = ISCSI_OP_NOOP_IN; 2983 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2984 hton24(hdr->dlength, cmd->buf_ptr_size); 2985 if (nopout_response) 2986 put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun); 2987 hdr->itt = cmd->init_task_tag; 2988 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 2989 cmd->stat_sn = (nopout_response) ? conn->stat_sn++ : 2990 conn->stat_sn; 2991 hdr->statsn = cpu_to_be32(cmd->stat_sn); 2992 2993 if (nopout_response) 2994 iscsit_increment_maxcmdsn(cmd, conn->sess); 2995 2996 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 2997 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 2998 2999 pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x," 3000 " StatSN: 0x%08x, Length %u\n", (nopout_response) ? 3001 "Solicited" : "Unsolicited", cmd->init_task_tag, 3002 cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size); 3003 } 3004 EXPORT_SYMBOL(iscsit_build_nopin_rsp); 3005 3006 /* 3007 * Unsolicited NOPIN, either requesting a response or not. 3008 */ 3009 static int iscsit_send_unsolicited_nopin( 3010 struct iscsit_cmd *cmd, 3011 struct iscsit_conn *conn, 3012 int want_response) 3013 { 3014 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; 3015 int ret; 3016 3017 iscsit_build_nopin_rsp(cmd, conn, hdr, false); 3018 3019 pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:" 3020 " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid); 3021 3022 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); 3023 if (ret < 0) 3024 return ret; 3025 3026 spin_lock_bh(&cmd->istate_lock); 3027 cmd->i_state = want_response ? 3028 ISTATE_SENT_NOPIN_WANT_RESPONSE : ISTATE_SENT_STATUS; 3029 spin_unlock_bh(&cmd->istate_lock); 3030 3031 return 0; 3032 } 3033 3034 static int 3035 iscsit_send_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn) 3036 { 3037 struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0]; 3038 3039 iscsit_build_nopin_rsp(cmd, conn, hdr, true); 3040 3041 /* 3042 * NOPOUT Ping Data is attached to struct iscsit_cmd->buf_ptr. 3043 * NOPOUT DataSegmentLength is at struct iscsit_cmd->buf_ptr_size. 3044 */ 3045 pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size); 3046 3047 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, 3048 cmd->buf_ptr, 3049 cmd->buf_ptr_size); 3050 } 3051 3052 static int iscsit_send_r2t( 3053 struct iscsit_cmd *cmd, 3054 struct iscsit_conn *conn) 3055 { 3056 struct iscsi_r2t *r2t; 3057 struct iscsi_r2t_rsp *hdr; 3058 int ret; 3059 3060 r2t = iscsit_get_r2t_from_list(cmd); 3061 if (!r2t) 3062 return -1; 3063 3064 hdr = (struct iscsi_r2t_rsp *) cmd->pdu; 3065 memset(hdr, 0, ISCSI_HDR_LEN); 3066 hdr->opcode = ISCSI_OP_R2T; 3067 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3068 int_to_scsilun(cmd->se_cmd.orig_fe_lun, 3069 (struct scsi_lun *)&hdr->lun); 3070 hdr->itt = cmd->init_task_tag; 3071 if (conn->conn_transport->iscsit_get_r2t_ttt) 3072 conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t); 3073 else 3074 r2t->targ_xfer_tag = session_get_next_ttt(conn->sess); 3075 hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); 3076 hdr->statsn = cpu_to_be32(conn->stat_sn); 3077 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3078 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 3079 hdr->r2tsn = cpu_to_be32(r2t->r2t_sn); 3080 hdr->data_offset = cpu_to_be32(r2t->offset); 3081 hdr->data_length = cpu_to_be32(r2t->xfer_len); 3082 3083 pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:" 3084 " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n", 3085 (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag, 3086 r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn, 3087 r2t->offset, r2t->xfer_len, conn->cid); 3088 3089 spin_lock_bh(&cmd->r2t_lock); 3090 r2t->sent_r2t = 1; 3091 spin_unlock_bh(&cmd->r2t_lock); 3092 3093 ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); 3094 if (ret < 0) { 3095 return ret; 3096 } 3097 3098 spin_lock_bh(&cmd->dataout_timeout_lock); 3099 iscsit_start_dataout_timer(cmd, conn); 3100 spin_unlock_bh(&cmd->dataout_timeout_lock); 3101 3102 return 0; 3103 } 3104 3105 /* 3106 * @recovery: If called from iscsi_task_reassign_complete_write() for 3107 * connection recovery. 3108 */ 3109 int iscsit_build_r2ts_for_cmd( 3110 struct iscsit_conn *conn, 3111 struct iscsit_cmd *cmd, 3112 bool recovery) 3113 { 3114 int first_r2t = 1; 3115 u32 offset = 0, xfer_len = 0; 3116 3117 spin_lock_bh(&cmd->r2t_lock); 3118 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) { 3119 spin_unlock_bh(&cmd->r2t_lock); 3120 return 0; 3121 } 3122 3123 if (conn->sess->sess_ops->DataSequenceInOrder && 3124 !recovery) 3125 cmd->r2t_offset = max(cmd->r2t_offset, cmd->write_data_done); 3126 3127 while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) { 3128 if (conn->sess->sess_ops->DataSequenceInOrder) { 3129 offset = cmd->r2t_offset; 3130 3131 if (first_r2t && recovery) { 3132 int new_data_end = offset + 3133 conn->sess->sess_ops->MaxBurstLength - 3134 cmd->next_burst_len; 3135 3136 if (new_data_end > cmd->se_cmd.data_length) 3137 xfer_len = cmd->se_cmd.data_length - offset; 3138 else 3139 xfer_len = 3140 conn->sess->sess_ops->MaxBurstLength - 3141 cmd->next_burst_len; 3142 } else { 3143 int new_data_end = offset + 3144 conn->sess->sess_ops->MaxBurstLength; 3145 3146 if (new_data_end > cmd->se_cmd.data_length) 3147 xfer_len = cmd->se_cmd.data_length - offset; 3148 else 3149 xfer_len = conn->sess->sess_ops->MaxBurstLength; 3150 } 3151 3152 if ((s32)xfer_len < 0) { 3153 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 3154 break; 3155 } 3156 3157 cmd->r2t_offset += xfer_len; 3158 3159 if (cmd->r2t_offset == cmd->se_cmd.data_length) 3160 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 3161 } else { 3162 struct iscsi_seq *seq; 3163 3164 seq = iscsit_get_seq_holder_for_r2t(cmd); 3165 if (!seq) { 3166 spin_unlock_bh(&cmd->r2t_lock); 3167 return -1; 3168 } 3169 3170 offset = seq->offset; 3171 xfer_len = seq->xfer_len; 3172 3173 if (cmd->seq_send_order == cmd->seq_count) 3174 cmd->cmd_flags |= ICF_SENT_LAST_R2T; 3175 } 3176 cmd->outstanding_r2ts++; 3177 first_r2t = 0; 3178 3179 if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) { 3180 spin_unlock_bh(&cmd->r2t_lock); 3181 return -1; 3182 } 3183 3184 if (cmd->cmd_flags & ICF_SENT_LAST_R2T) 3185 break; 3186 } 3187 spin_unlock_bh(&cmd->r2t_lock); 3188 3189 return 0; 3190 } 3191 EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd); 3192 3193 void iscsit_build_rsp_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn, 3194 bool inc_stat_sn, struct iscsi_scsi_rsp *hdr) 3195 { 3196 if (inc_stat_sn) 3197 cmd->stat_sn = conn->stat_sn++; 3198 3199 atomic_long_inc(&conn->sess->rsp_pdus); 3200 3201 memset(hdr, 0, ISCSI_HDR_LEN); 3202 hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; 3203 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3204 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) { 3205 hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW; 3206 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 3207 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) { 3208 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; 3209 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count); 3210 } 3211 hdr->response = cmd->iscsi_response; 3212 hdr->cmd_status = cmd->se_cmd.scsi_status; 3213 hdr->itt = cmd->init_task_tag; 3214 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3215 3216 iscsit_increment_maxcmdsn(cmd, conn->sess); 3217 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3218 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 3219 3220 pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x," 3221 " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n", 3222 cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status, 3223 cmd->se_cmd.scsi_status, conn->cid); 3224 } 3225 EXPORT_SYMBOL(iscsit_build_rsp_pdu); 3226 3227 static int iscsit_send_response(struct iscsit_cmd *cmd, struct iscsit_conn *conn) 3228 { 3229 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0]; 3230 bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS); 3231 void *data_buf = NULL; 3232 u32 padding = 0, data_buf_len = 0; 3233 3234 iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr); 3235 3236 /* 3237 * Attach SENSE DATA payload to iSCSI Response PDU 3238 */ 3239 if (cmd->se_cmd.sense_buffer && 3240 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || 3241 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { 3242 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer); 3243 cmd->se_cmd.scsi_sense_length += sizeof (__be16); 3244 3245 padding = -(cmd->se_cmd.scsi_sense_length) & 3; 3246 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length); 3247 data_buf = cmd->sense_buffer; 3248 data_buf_len = cmd->se_cmd.scsi_sense_length + padding; 3249 3250 if (padding) { 3251 memset(cmd->sense_buffer + 3252 cmd->se_cmd.scsi_sense_length, 0, padding); 3253 pr_debug("Adding %u bytes of padding to" 3254 " SENSE.\n", padding); 3255 } 3256 3257 pr_debug("Attaching SENSE DATA: %u bytes to iSCSI" 3258 " Response PDU\n", 3259 cmd->se_cmd.scsi_sense_length); 3260 } 3261 3262 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf, 3263 data_buf_len); 3264 } 3265 3266 static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr) 3267 { 3268 switch (se_tmr->response) { 3269 case TMR_FUNCTION_COMPLETE: 3270 return ISCSI_TMF_RSP_COMPLETE; 3271 case TMR_TASK_DOES_NOT_EXIST: 3272 return ISCSI_TMF_RSP_NO_TASK; 3273 case TMR_LUN_DOES_NOT_EXIST: 3274 return ISCSI_TMF_RSP_NO_LUN; 3275 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: 3276 return ISCSI_TMF_RSP_NOT_SUPPORTED; 3277 case TMR_FUNCTION_REJECTED: 3278 default: 3279 return ISCSI_TMF_RSP_REJECTED; 3280 } 3281 } 3282 3283 void 3284 iscsit_build_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn, 3285 struct iscsi_tm_rsp *hdr) 3286 { 3287 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; 3288 3289 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 3290 hdr->flags = ISCSI_FLAG_CMD_FINAL; 3291 hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr); 3292 hdr->itt = cmd->init_task_tag; 3293 cmd->stat_sn = conn->stat_sn++; 3294 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3295 3296 iscsit_increment_maxcmdsn(cmd, conn->sess); 3297 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3298 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 3299 3300 pr_debug("Built Task Management Response ITT: 0x%08x," 3301 " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n", 3302 cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid); 3303 } 3304 EXPORT_SYMBOL(iscsit_build_task_mgt_rsp); 3305 3306 static int 3307 iscsit_send_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn) 3308 { 3309 struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0]; 3310 3311 iscsit_build_task_mgt_rsp(cmd, conn, hdr); 3312 3313 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); 3314 } 3315 3316 #define SENDTARGETS_BUF_LIMIT 32768U 3317 3318 static int 3319 iscsit_build_sendtargets_response(struct iscsit_cmd *cmd, 3320 enum iscsit_transport_type network_transport, 3321 int skip_bytes, bool *completed) 3322 { 3323 char *payload = NULL; 3324 struct iscsit_conn *conn = cmd->conn; 3325 struct iscsi_portal_group *tpg; 3326 struct iscsi_tiqn *tiqn; 3327 struct iscsi_tpg_np *tpg_np; 3328 int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; 3329 int target_name_printed; 3330 unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ 3331 unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; 3332 bool active; 3333 3334 buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength, 3335 SENDTARGETS_BUF_LIMIT); 3336 3337 payload = kzalloc(buffer_len, GFP_KERNEL); 3338 if (!payload) 3339 return -ENOMEM; 3340 3341 /* 3342 * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE 3343 * explicit case.. 3344 */ 3345 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) { 3346 text_ptr = strchr(text_in, '='); 3347 if (!text_ptr) { 3348 pr_err("Unable to locate '=' string in text_in:" 3349 " %s\n", text_in); 3350 kfree(payload); 3351 return -EINVAL; 3352 } 3353 /* 3354 * Skip over '=' character.. 3355 */ 3356 text_ptr += 1; 3357 } 3358 3359 spin_lock(&tiqn_lock); 3360 list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { 3361 if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) && 3362 strcmp(tiqn->tiqn, text_ptr)) { 3363 continue; 3364 } 3365 3366 target_name_printed = 0; 3367 3368 spin_lock(&tiqn->tiqn_tpg_lock); 3369 list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { 3370 3371 /* If demo_mode_discovery=0 and generate_node_acls=0 3372 * (demo mode dislabed) do not return 3373 * TargetName+TargetAddress unless a NodeACL exists. 3374 */ 3375 3376 if ((tpg->tpg_attrib.generate_node_acls == 0) && 3377 (tpg->tpg_attrib.demo_mode_discovery == 0) && 3378 (!target_tpg_has_node_acl(&tpg->tpg_se_tpg, 3379 cmd->conn->sess->sess_ops->InitiatorName))) { 3380 continue; 3381 } 3382 3383 spin_lock(&tpg->tpg_state_lock); 3384 active = (tpg->tpg_state == TPG_STATE_ACTIVE); 3385 spin_unlock(&tpg->tpg_state_lock); 3386 3387 if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets) 3388 continue; 3389 3390 spin_lock(&tpg->tpg_np_lock); 3391 list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, 3392 tpg_np_list) { 3393 struct iscsi_np *np = tpg_np->tpg_np; 3394 struct sockaddr_storage *sockaddr; 3395 3396 if (np->np_network_transport != network_transport) 3397 continue; 3398 3399 if (!target_name_printed) { 3400 len = sprintf(buf, "TargetName=%s", 3401 tiqn->tiqn); 3402 len += 1; 3403 3404 if ((len + payload_len) > buffer_len) { 3405 spin_unlock(&tpg->tpg_np_lock); 3406 spin_unlock(&tiqn->tiqn_tpg_lock); 3407 end_of_buf = 1; 3408 goto eob; 3409 } 3410 3411 if (skip_bytes && len <= skip_bytes) { 3412 skip_bytes -= len; 3413 } else { 3414 memcpy(payload + payload_len, buf, len); 3415 payload_len += len; 3416 target_name_printed = 1; 3417 if (len > skip_bytes) 3418 skip_bytes = 0; 3419 } 3420 } 3421 3422 if (inet_addr_is_any((struct sockaddr *)&np->np_sockaddr)) 3423 sockaddr = &conn->local_sockaddr; 3424 else 3425 sockaddr = &np->np_sockaddr; 3426 3427 len = sprintf(buf, "TargetAddress=" 3428 "%pISpc,%hu", 3429 sockaddr, 3430 tpg->tpgt); 3431 len += 1; 3432 3433 if ((len + payload_len) > buffer_len) { 3434 spin_unlock(&tpg->tpg_np_lock); 3435 spin_unlock(&tiqn->tiqn_tpg_lock); 3436 end_of_buf = 1; 3437 goto eob; 3438 } 3439 3440 if (skip_bytes && len <= skip_bytes) { 3441 skip_bytes -= len; 3442 } else { 3443 memcpy(payload + payload_len, buf, len); 3444 payload_len += len; 3445 if (len > skip_bytes) 3446 skip_bytes = 0; 3447 } 3448 } 3449 spin_unlock(&tpg->tpg_np_lock); 3450 } 3451 spin_unlock(&tiqn->tiqn_tpg_lock); 3452 eob: 3453 if (end_of_buf) { 3454 *completed = false; 3455 break; 3456 } 3457 3458 if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) 3459 break; 3460 } 3461 spin_unlock(&tiqn_lock); 3462 3463 cmd->buf_ptr = payload; 3464 3465 return payload_len; 3466 } 3467 3468 int 3469 iscsit_build_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn, 3470 struct iscsi_text_rsp *hdr, 3471 enum iscsit_transport_type network_transport) 3472 { 3473 int text_length, padding; 3474 bool completed = true; 3475 3476 text_length = iscsit_build_sendtargets_response(cmd, network_transport, 3477 cmd->read_data_done, 3478 &completed); 3479 if (text_length < 0) 3480 return text_length; 3481 3482 if (completed) { 3483 hdr->flags = ISCSI_FLAG_CMD_FINAL; 3484 } else { 3485 hdr->flags = ISCSI_FLAG_TEXT_CONTINUE; 3486 cmd->read_data_done += text_length; 3487 if (cmd->targ_xfer_tag == 0xFFFFFFFF) 3488 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); 3489 } 3490 hdr->opcode = ISCSI_OP_TEXT_RSP; 3491 padding = ((-text_length) & 3); 3492 hton24(hdr->dlength, text_length); 3493 hdr->itt = cmd->init_task_tag; 3494 hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag); 3495 cmd->stat_sn = conn->stat_sn++; 3496 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3497 3498 iscsit_increment_maxcmdsn(cmd, conn->sess); 3499 /* 3500 * Reset maxcmdsn_inc in multi-part text payload exchanges to 3501 * correctly increment MaxCmdSN for each response answering a 3502 * non immediate text request with a valid CmdSN. 3503 */ 3504 cmd->maxcmdsn_inc = 0; 3505 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3506 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 3507 3508 pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x," 3509 " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag, 3510 cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid, 3511 !!(hdr->flags & ISCSI_FLAG_CMD_FINAL), 3512 !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)); 3513 3514 return text_length + padding; 3515 } 3516 EXPORT_SYMBOL(iscsit_build_text_rsp); 3517 3518 static int iscsit_send_text_rsp( 3519 struct iscsit_cmd *cmd, 3520 struct iscsit_conn *conn) 3521 { 3522 struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu; 3523 int text_length; 3524 3525 text_length = iscsit_build_text_rsp(cmd, conn, hdr, 3526 conn->conn_transport->transport_type); 3527 if (text_length < 0) 3528 return text_length; 3529 3530 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, 3531 cmd->buf_ptr, 3532 text_length); 3533 } 3534 3535 void 3536 iscsit_build_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn, 3537 struct iscsi_reject *hdr) 3538 { 3539 hdr->opcode = ISCSI_OP_REJECT; 3540 hdr->reason = cmd->reject_reason; 3541 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3542 hton24(hdr->dlength, ISCSI_HDR_LEN); 3543 hdr->ffffffff = cpu_to_be32(0xffffffff); 3544 cmd->stat_sn = conn->stat_sn++; 3545 hdr->statsn = cpu_to_be32(cmd->stat_sn); 3546 hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); 3547 hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn)); 3548 3549 } 3550 EXPORT_SYMBOL(iscsit_build_reject); 3551 3552 static int iscsit_send_reject( 3553 struct iscsit_cmd *cmd, 3554 struct iscsit_conn *conn) 3555 { 3556 struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0]; 3557 3558 iscsit_build_reject(cmd, conn, hdr); 3559 3560 pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x," 3561 " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid); 3562 3563 return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, 3564 cmd->buf_ptr, 3565 ISCSI_HDR_LEN); 3566 } 3567 3568 void iscsit_thread_get_cpumask(struct iscsit_conn *conn) 3569 { 3570 int ord, cpu; 3571 cpumask_var_t conn_allowed_cpumask; 3572 3573 /* 3574 * bitmap_id is assigned from iscsit_global->ts_bitmap from 3575 * within iscsit_start_kthreads() 3576 * 3577 * Here we use bitmap_id to determine which CPU that this 3578 * iSCSI connection's RX/TX threads will be scheduled to 3579 * execute upon. 3580 */ 3581 if (!zalloc_cpumask_var(&conn_allowed_cpumask, GFP_KERNEL)) { 3582 ord = conn->bitmap_id % cpumask_weight(cpu_online_mask); 3583 for_each_online_cpu(cpu) { 3584 if (ord-- == 0) { 3585 cpumask_set_cpu(cpu, conn->conn_cpumask); 3586 return; 3587 } 3588 } 3589 } else { 3590 cpumask_and(conn_allowed_cpumask, iscsit_global->allowed_cpumask, 3591 cpu_online_mask); 3592 3593 cpumask_clear(conn->conn_cpumask); 3594 ord = conn->bitmap_id % cpumask_weight(conn_allowed_cpumask); 3595 for_each_cpu(cpu, conn_allowed_cpumask) { 3596 if (ord-- == 0) { 3597 cpumask_set_cpu(cpu, conn->conn_cpumask); 3598 free_cpumask_var(conn_allowed_cpumask); 3599 return; 3600 } 3601 } 3602 free_cpumask_var(conn_allowed_cpumask); 3603 } 3604 /* 3605 * This should never be reached.. 3606 */ 3607 dump_stack(); 3608 cpumask_setall(conn->conn_cpumask); 3609 } 3610 3611 static void iscsit_thread_reschedule(struct iscsit_conn *conn) 3612 { 3613 /* 3614 * If iscsit_global->allowed_cpumask modified, reschedule iSCSI 3615 * connection's RX/TX threads update conn->allowed_cpumask. 3616 */ 3617 if (!cpumask_equal(iscsit_global->allowed_cpumask, 3618 conn->allowed_cpumask)) { 3619 iscsit_thread_get_cpumask(conn); 3620 conn->conn_tx_reset_cpumask = 1; 3621 conn->conn_rx_reset_cpumask = 1; 3622 cpumask_copy(conn->allowed_cpumask, 3623 iscsit_global->allowed_cpumask); 3624 } 3625 } 3626 3627 void iscsit_thread_check_cpumask( 3628 struct iscsit_conn *conn, 3629 struct task_struct *p, 3630 int mode) 3631 { 3632 /* 3633 * The TX and RX threads maybe call iscsit_thread_check_cpumask() 3634 * at the same time. The RX thread might be faster and return from 3635 * iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0. 3636 * Then the TX thread sets it back to 1. 3637 * The next time the RX thread loops, it sees conn_rx_reset_cpumask 3638 * set to 1 and calls set_cpus_allowed_ptr() again and set it to 0. 3639 */ 3640 iscsit_thread_reschedule(conn); 3641 3642 /* 3643 * mode == 1 signals iscsi_target_tx_thread() usage. 3644 * mode == 0 signals iscsi_target_rx_thread() usage. 3645 */ 3646 if (mode == 1) { 3647 if (!conn->conn_tx_reset_cpumask) 3648 return; 3649 } else { 3650 if (!conn->conn_rx_reset_cpumask) 3651 return; 3652 } 3653 3654 /* 3655 * Update the CPU mask for this single kthread so that 3656 * both TX and RX kthreads are scheduled to run on the 3657 * same CPU. 3658 */ 3659 set_cpus_allowed_ptr(p, conn->conn_cpumask); 3660 if (mode == 1) 3661 conn->conn_tx_reset_cpumask = 0; 3662 else 3663 conn->conn_rx_reset_cpumask = 0; 3664 } 3665 EXPORT_SYMBOL(iscsit_thread_check_cpumask); 3666 3667 int 3668 iscsit_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state) 3669 { 3670 int ret; 3671 3672 switch (state) { 3673 case ISTATE_SEND_R2T: 3674 ret = iscsit_send_r2t(cmd, conn); 3675 if (ret < 0) 3676 goto err; 3677 break; 3678 case ISTATE_REMOVE: 3679 spin_lock_bh(&conn->cmd_lock); 3680 list_del_init(&cmd->i_conn_node); 3681 spin_unlock_bh(&conn->cmd_lock); 3682 3683 iscsit_free_cmd(cmd, false); 3684 break; 3685 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3686 iscsit_mod_nopin_response_timer(conn); 3687 ret = iscsit_send_unsolicited_nopin(cmd, conn, 1); 3688 if (ret < 0) 3689 goto err; 3690 break; 3691 case ISTATE_SEND_NOPIN_NO_RESPONSE: 3692 ret = iscsit_send_unsolicited_nopin(cmd, conn, 0); 3693 if (ret < 0) 3694 goto err; 3695 break; 3696 default: 3697 pr_err("Unknown Opcode: 0x%02x ITT:" 3698 " 0x%08x, i_state: %d on CID: %hu\n", 3699 cmd->iscsi_opcode, cmd->init_task_tag, state, 3700 conn->cid); 3701 goto err; 3702 } 3703 3704 return 0; 3705 3706 err: 3707 return -1; 3708 } 3709 EXPORT_SYMBOL(iscsit_immediate_queue); 3710 3711 static int 3712 iscsit_handle_immediate_queue(struct iscsit_conn *conn) 3713 { 3714 struct iscsit_transport *t = conn->conn_transport; 3715 struct iscsi_queue_req *qr; 3716 struct iscsit_cmd *cmd; 3717 u8 state; 3718 int ret; 3719 3720 while ((qr = iscsit_get_cmd_from_immediate_queue(conn))) { 3721 atomic_set(&conn->check_immediate_queue, 0); 3722 cmd = qr->cmd; 3723 state = qr->state; 3724 kmem_cache_free(lio_qr_cache, qr); 3725 3726 ret = t->iscsit_immediate_queue(conn, cmd, state); 3727 if (ret < 0) 3728 return ret; 3729 } 3730 3731 return 0; 3732 } 3733 3734 int 3735 iscsit_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state) 3736 { 3737 int ret; 3738 3739 check_rsp_state: 3740 switch (state) { 3741 case ISTATE_SEND_DATAIN: 3742 ret = iscsit_send_datain(cmd, conn); 3743 if (ret < 0) 3744 goto err; 3745 else if (!ret) 3746 /* more drs */ 3747 goto check_rsp_state; 3748 else if (ret == 1) { 3749 /* all done */ 3750 spin_lock_bh(&cmd->istate_lock); 3751 cmd->i_state = ISTATE_SENT_STATUS; 3752 spin_unlock_bh(&cmd->istate_lock); 3753 3754 if (atomic_read(&conn->check_immediate_queue)) 3755 return 1; 3756 3757 return 0; 3758 } else if (ret == 2) { 3759 /* Still must send status, 3760 SCF_TRANSPORT_TASK_SENSE was set */ 3761 spin_lock_bh(&cmd->istate_lock); 3762 cmd->i_state = ISTATE_SEND_STATUS; 3763 spin_unlock_bh(&cmd->istate_lock); 3764 state = ISTATE_SEND_STATUS; 3765 goto check_rsp_state; 3766 } 3767 3768 break; 3769 case ISTATE_SEND_STATUS: 3770 case ISTATE_SEND_STATUS_RECOVERY: 3771 ret = iscsit_send_response(cmd, conn); 3772 break; 3773 case ISTATE_SEND_LOGOUTRSP: 3774 ret = iscsit_send_logout(cmd, conn); 3775 break; 3776 case ISTATE_SEND_ASYNCMSG: 3777 ret = iscsit_send_conn_drop_async_message( 3778 cmd, conn); 3779 break; 3780 case ISTATE_SEND_NOPIN: 3781 ret = iscsit_send_nopin(cmd, conn); 3782 break; 3783 case ISTATE_SEND_REJECT: 3784 ret = iscsit_send_reject(cmd, conn); 3785 break; 3786 case ISTATE_SEND_TASKMGTRSP: 3787 ret = iscsit_send_task_mgt_rsp(cmd, conn); 3788 if (ret != 0) 3789 break; 3790 ret = iscsit_tmr_post_handler(cmd, conn); 3791 if (ret != 0) 3792 iscsit_fall_back_to_erl0(conn->sess); 3793 break; 3794 case ISTATE_SEND_TEXTRSP: 3795 ret = iscsit_send_text_rsp(cmd, conn); 3796 break; 3797 default: 3798 pr_err("Unknown Opcode: 0x%02x ITT:" 3799 " 0x%08x, i_state: %d on CID: %hu\n", 3800 cmd->iscsi_opcode, cmd->init_task_tag, 3801 state, conn->cid); 3802 goto err; 3803 } 3804 if (ret < 0) 3805 goto err; 3806 3807 switch (state) { 3808 case ISTATE_SEND_LOGOUTRSP: 3809 if (!iscsit_logout_post_handler(cmd, conn)) 3810 return -ECONNRESET; 3811 fallthrough; 3812 case ISTATE_SEND_STATUS: 3813 case ISTATE_SEND_ASYNCMSG: 3814 case ISTATE_SEND_NOPIN: 3815 case ISTATE_SEND_STATUS_RECOVERY: 3816 case ISTATE_SEND_TEXTRSP: 3817 case ISTATE_SEND_TASKMGTRSP: 3818 case ISTATE_SEND_REJECT: 3819 spin_lock_bh(&cmd->istate_lock); 3820 cmd->i_state = ISTATE_SENT_STATUS; 3821 spin_unlock_bh(&cmd->istate_lock); 3822 break; 3823 default: 3824 pr_err("Unknown Opcode: 0x%02x ITT:" 3825 " 0x%08x, i_state: %d on CID: %hu\n", 3826 cmd->iscsi_opcode, cmd->init_task_tag, 3827 cmd->i_state, conn->cid); 3828 goto err; 3829 } 3830 3831 if (atomic_read(&conn->check_immediate_queue)) 3832 return 1; 3833 3834 return 0; 3835 3836 err: 3837 return -1; 3838 } 3839 EXPORT_SYMBOL(iscsit_response_queue); 3840 3841 static int iscsit_handle_response_queue(struct iscsit_conn *conn) 3842 { 3843 struct iscsit_transport *t = conn->conn_transport; 3844 struct iscsi_queue_req *qr; 3845 struct iscsit_cmd *cmd; 3846 u8 state; 3847 int ret; 3848 3849 while ((qr = iscsit_get_cmd_from_response_queue(conn))) { 3850 cmd = qr->cmd; 3851 state = qr->state; 3852 kmem_cache_free(lio_qr_cache, qr); 3853 3854 ret = t->iscsit_response_queue(conn, cmd, state); 3855 if (ret == 1 || ret < 0) 3856 return ret; 3857 } 3858 3859 return 0; 3860 } 3861 3862 int iscsi_target_tx_thread(void *arg) 3863 { 3864 int ret = 0; 3865 struct iscsit_conn *conn = arg; 3866 bool conn_freed = false; 3867 3868 /* 3869 * Allow ourselves to be interrupted by SIGINT so that a 3870 * connection recovery / failure event can be triggered externally. 3871 */ 3872 allow_signal(SIGINT); 3873 3874 while (!kthread_should_stop()) { 3875 /* 3876 * Ensure that both TX and RX per connection kthreads 3877 * are scheduled to run on the same CPU. 3878 */ 3879 iscsit_thread_check_cpumask(conn, current, 1); 3880 3881 wait_event_interruptible(conn->queues_wq, 3882 !iscsit_conn_all_queues_empty(conn)); 3883 3884 if (signal_pending(current)) 3885 goto transport_err; 3886 3887 get_immediate: 3888 ret = iscsit_handle_immediate_queue(conn); 3889 if (ret < 0) 3890 goto transport_err; 3891 3892 ret = iscsit_handle_response_queue(conn); 3893 if (ret == 1) { 3894 goto get_immediate; 3895 } else if (ret == -ECONNRESET) { 3896 conn_freed = true; 3897 goto out; 3898 } else if (ret < 0) { 3899 goto transport_err; 3900 } 3901 } 3902 3903 transport_err: 3904 /* 3905 * Avoid the normal connection failure code-path if this connection 3906 * is still within LOGIN mode, and iscsi_np process context is 3907 * responsible for cleaning up the early connection failure. 3908 */ 3909 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) 3910 iscsit_take_action_for_connection_exit(conn, &conn_freed); 3911 out: 3912 if (!conn_freed) { 3913 while (!kthread_should_stop()) { 3914 msleep(100); 3915 } 3916 } 3917 return 0; 3918 } 3919 3920 static int iscsi_target_rx_opcode(struct iscsit_conn *conn, unsigned char *buf) 3921 { 3922 struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf; 3923 struct iscsit_cmd *cmd; 3924 int ret = 0; 3925 3926 switch (hdr->opcode & ISCSI_OPCODE_MASK) { 3927 case ISCSI_OP_SCSI_CMD: 3928 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 3929 if (!cmd) 3930 goto reject; 3931 3932 ret = iscsit_handle_scsi_cmd(conn, cmd, buf); 3933 break; 3934 case ISCSI_OP_SCSI_DATA_OUT: 3935 ret = iscsit_handle_data_out(conn, buf); 3936 break; 3937 case ISCSI_OP_NOOP_OUT: 3938 cmd = NULL; 3939 if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { 3940 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 3941 if (!cmd) 3942 goto reject; 3943 } 3944 ret = iscsit_handle_nop_out(conn, cmd, buf); 3945 break; 3946 case ISCSI_OP_SCSI_TMFUNC: 3947 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 3948 if (!cmd) 3949 goto reject; 3950 3951 ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); 3952 break; 3953 case ISCSI_OP_TEXT: 3954 if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { 3955 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 3956 if (!cmd) 3957 goto reject; 3958 } else { 3959 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 3960 if (!cmd) 3961 goto reject; 3962 } 3963 3964 ret = iscsit_handle_text_cmd(conn, cmd, buf); 3965 break; 3966 case ISCSI_OP_LOGOUT: 3967 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 3968 if (!cmd) 3969 goto reject; 3970 3971 ret = iscsit_handle_logout_cmd(conn, cmd, buf); 3972 if (ret > 0) 3973 wait_for_completion_timeout(&conn->conn_logout_comp, 3974 SECONDS_FOR_LOGOUT_COMP * HZ); 3975 break; 3976 case ISCSI_OP_SNACK: 3977 ret = iscsit_handle_snack(conn, buf); 3978 break; 3979 default: 3980 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode); 3981 if (!conn->sess->sess_ops->ErrorRecoveryLevel) { 3982 pr_err("Cannot recover from unknown" 3983 " opcode while ERL=0, closing iSCSI connection.\n"); 3984 return -1; 3985 } 3986 pr_err("Unable to recover from unknown opcode while OFMarker=No," 3987 " closing iSCSI connection.\n"); 3988 ret = -1; 3989 break; 3990 } 3991 3992 return ret; 3993 reject: 3994 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 3995 } 3996 3997 static bool iscsi_target_check_conn_state(struct iscsit_conn *conn) 3998 { 3999 bool ret; 4000 4001 spin_lock_bh(&conn->state_lock); 4002 ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN); 4003 spin_unlock_bh(&conn->state_lock); 4004 4005 return ret; 4006 } 4007 4008 static void iscsit_get_rx_pdu(struct iscsit_conn *conn) 4009 { 4010 int ret; 4011 u8 *buffer, *tmp_buf, opcode; 4012 u32 checksum = 0, digest = 0; 4013 struct iscsi_hdr *hdr; 4014 struct kvec iov; 4015 4016 buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL); 4017 if (!buffer) 4018 return; 4019 4020 while (!kthread_should_stop()) { 4021 /* 4022 * Ensure that both TX and RX per connection kthreads 4023 * are scheduled to run on the same CPU. 4024 */ 4025 iscsit_thread_check_cpumask(conn, current, 0); 4026 4027 memset(&iov, 0, sizeof(struct kvec)); 4028 4029 iov.iov_base = buffer; 4030 iov.iov_len = ISCSI_HDR_LEN; 4031 4032 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); 4033 if (ret != ISCSI_HDR_LEN) { 4034 iscsit_rx_thread_wait_for_tcp(conn); 4035 break; 4036 } 4037 4038 hdr = (struct iscsi_hdr *) buffer; 4039 if (hdr->hlength) { 4040 iov.iov_len = hdr->hlength * 4; 4041 tmp_buf = krealloc(buffer, 4042 ISCSI_HDR_LEN + iov.iov_len, 4043 GFP_KERNEL); 4044 if (!tmp_buf) 4045 break; 4046 4047 buffer = tmp_buf; 4048 iov.iov_base = &buffer[ISCSI_HDR_LEN]; 4049 4050 ret = rx_data(conn, &iov, 1, iov.iov_len); 4051 if (ret != iov.iov_len) { 4052 iscsit_rx_thread_wait_for_tcp(conn); 4053 break; 4054 } 4055 } 4056 4057 if (conn->conn_ops->HeaderDigest) { 4058 iov.iov_base = &digest; 4059 iov.iov_len = ISCSI_CRC_LEN; 4060 4061 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); 4062 if (ret != ISCSI_CRC_LEN) { 4063 iscsit_rx_thread_wait_for_tcp(conn); 4064 break; 4065 } 4066 4067 checksum = iscsit_crc_buf(buffer, ISCSI_HDR_LEN, 0, 4068 NULL); 4069 if (digest != checksum) { 4070 pr_err("HeaderDigest CRC32C failed," 4071 " received 0x%08x, computed 0x%08x\n", 4072 digest, checksum); 4073 /* 4074 * Set the PDU to 0xff so it will intentionally 4075 * hit default in the switch below. 4076 */ 4077 memset(buffer, 0xff, ISCSI_HDR_LEN); 4078 atomic_long_inc(&conn->sess->conn_digest_errors); 4079 } else { 4080 pr_debug("Got HeaderDigest CRC32C" 4081 " 0x%08x\n", checksum); 4082 } 4083 } 4084 4085 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) 4086 break; 4087 4088 opcode = buffer[0] & ISCSI_OPCODE_MASK; 4089 4090 if (conn->sess->sess_ops->SessionType && 4091 ((!(opcode & ISCSI_OP_TEXT)) || 4092 (!(opcode & ISCSI_OP_LOGOUT)))) { 4093 pr_err("Received illegal iSCSI Opcode: 0x%02x" 4094 " while in Discovery Session, rejecting.\n", opcode); 4095 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 4096 buffer); 4097 break; 4098 } 4099 4100 ret = iscsi_target_rx_opcode(conn, buffer); 4101 if (ret < 0) 4102 break; 4103 } 4104 4105 kfree(buffer); 4106 } 4107 4108 int iscsi_target_rx_thread(void *arg) 4109 { 4110 int rc; 4111 struct iscsit_conn *conn = arg; 4112 bool conn_freed = false; 4113 4114 /* 4115 * Allow ourselves to be interrupted by SIGINT so that a 4116 * connection recovery / failure event can be triggered externally. 4117 */ 4118 allow_signal(SIGINT); 4119 /* 4120 * Wait for iscsi_post_login_handler() to complete before allowing 4121 * incoming iscsi/tcp socket I/O, and/or failing the connection. 4122 */ 4123 rc = wait_for_completion_interruptible(&conn->rx_login_comp); 4124 if (rc < 0 || iscsi_target_check_conn_state(conn)) 4125 goto out; 4126 4127 if (!conn->conn_transport->iscsit_get_rx_pdu) 4128 return 0; 4129 4130 conn->conn_transport->iscsit_get_rx_pdu(conn); 4131 4132 if (!signal_pending(current)) 4133 atomic_set(&conn->transport_failed, 1); 4134 iscsit_take_action_for_connection_exit(conn, &conn_freed); 4135 4136 out: 4137 if (!conn_freed) { 4138 while (!kthread_should_stop()) { 4139 msleep(100); 4140 } 4141 } 4142 4143 return 0; 4144 } 4145 4146 static void iscsit_release_commands_from_conn(struct iscsit_conn *conn) 4147 { 4148 LIST_HEAD(tmp_list); 4149 struct iscsit_cmd *cmd = NULL, *cmd_tmp = NULL; 4150 struct iscsit_session *sess = conn->sess; 4151 /* 4152 * We expect this function to only ever be called from either RX or TX 4153 * thread context via iscsit_close_connection() once the other context 4154 * has been reset -> returned sleeping pre-handler state. 4155 */ 4156 spin_lock_bh(&conn->cmd_lock); 4157 list_splice_init(&conn->conn_cmd_list, &tmp_list); 4158 4159 list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) { 4160 struct se_cmd *se_cmd = &cmd->se_cmd; 4161 4162 if (!se_cmd->se_tfo) 4163 continue; 4164 4165 spin_lock_irq(&se_cmd->t_state_lock); 4166 if (se_cmd->transport_state & CMD_T_ABORTED) { 4167 if (!(se_cmd->transport_state & CMD_T_TAS)) 4168 /* 4169 * LIO's abort path owns the cleanup for this, 4170 * so put it back on the list and let 4171 * aborted_task handle it. 4172 */ 4173 list_move_tail(&cmd->i_conn_node, 4174 &conn->conn_cmd_list); 4175 } else { 4176 se_cmd->transport_state |= CMD_T_FABRIC_STOP; 4177 } 4178 4179 if (cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { 4180 /* 4181 * We never submitted the cmd to LIO core, so we have 4182 * to tell LIO to perform the completion process. 4183 */ 4184 spin_unlock_irq(&se_cmd->t_state_lock); 4185 target_complete_cmd(&cmd->se_cmd, SAM_STAT_TASK_ABORTED); 4186 continue; 4187 } 4188 spin_unlock_irq(&se_cmd->t_state_lock); 4189 } 4190 spin_unlock_bh(&conn->cmd_lock); 4191 4192 list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) { 4193 list_del_init(&cmd->i_conn_node); 4194 4195 iscsit_increment_maxcmdsn(cmd, sess); 4196 iscsit_free_cmd(cmd, true); 4197 4198 } 4199 4200 /* 4201 * Wait on commands that were cleaned up via the aborted_task path. 4202 * LLDs that implement iscsit_wait_conn will already have waited for 4203 * commands. 4204 */ 4205 if (!conn->conn_transport->iscsit_wait_conn) { 4206 target_stop_cmd_counter(conn->cmd_cnt); 4207 target_wait_for_cmds(conn->cmd_cnt); 4208 } 4209 } 4210 4211 static void iscsit_stop_timers_for_cmds( 4212 struct iscsit_conn *conn) 4213 { 4214 struct iscsit_cmd *cmd; 4215 4216 spin_lock_bh(&conn->cmd_lock); 4217 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 4218 if (cmd->data_direction == DMA_TO_DEVICE) 4219 iscsit_stop_dataout_timer(cmd); 4220 } 4221 spin_unlock_bh(&conn->cmd_lock); 4222 } 4223 4224 int iscsit_close_connection( 4225 struct iscsit_conn *conn) 4226 { 4227 int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT); 4228 struct iscsit_session *sess = conn->sess; 4229 4230 pr_debug("Closing iSCSI connection CID %hu on SID:" 4231 " %u\n", conn->cid, sess->sid); 4232 /* 4233 * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD 4234 * case just in case the RX Thread in iscsi_target_rx_opcode() is 4235 * sleeping and the logout response never got sent because the 4236 * connection failed. 4237 * 4238 * However for iser-target, isert_wait4logout() is using conn_logout_comp 4239 * to signal logout response TX interrupt completion. Go ahead and skip 4240 * this for iser since isert_rx_opcode() does not wait on logout failure, 4241 * and to avoid iscsit_conn pointer dereference in iser-target code. 4242 */ 4243 if (!conn->conn_transport->rdma_shutdown) 4244 complete(&conn->conn_logout_comp); 4245 4246 if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { 4247 if (conn->tx_thread && 4248 cmpxchg(&conn->tx_thread_active, true, false)) { 4249 send_sig(SIGINT, conn->tx_thread, 1); 4250 kthread_stop(conn->tx_thread); 4251 } 4252 } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) { 4253 if (conn->rx_thread && 4254 cmpxchg(&conn->rx_thread_active, true, false)) { 4255 send_sig(SIGINT, conn->rx_thread, 1); 4256 kthread_stop(conn->rx_thread); 4257 } 4258 } 4259 4260 spin_lock(&iscsit_global->ts_bitmap_lock); 4261 bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, 4262 get_order(1)); 4263 spin_unlock(&iscsit_global->ts_bitmap_lock); 4264 4265 iscsit_stop_timers_for_cmds(conn); 4266 iscsit_stop_nopin_response_timer(conn); 4267 iscsit_stop_nopin_timer(conn); 4268 4269 if (conn->conn_transport->iscsit_wait_conn) 4270 conn->conn_transport->iscsit_wait_conn(conn); 4271 4272 /* 4273 * During Connection recovery drop unacknowledged out of order 4274 * commands for this connection, and prepare the other commands 4275 * for reallegiance. 4276 * 4277 * During normal operation clear the out of order commands (but 4278 * do not free the struct iscsi_ooo_cmdsn's) and release all 4279 * struct iscsit_cmds. 4280 */ 4281 if (atomic_read(&conn->connection_recovery)) { 4282 iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn); 4283 iscsit_prepare_cmds_for_reallegiance(conn); 4284 } else { 4285 iscsit_clear_ooo_cmdsns_for_conn(conn); 4286 iscsit_release_commands_from_conn(conn); 4287 } 4288 iscsit_free_queue_reqs_for_conn(conn); 4289 4290 /* 4291 * Handle decrementing session or connection usage count if 4292 * a logout response was not able to be sent because the 4293 * connection failed. Fall back to Session Recovery here. 4294 */ 4295 if (atomic_read(&conn->conn_logout_remove)) { 4296 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) { 4297 iscsit_dec_conn_usage_count(conn); 4298 iscsit_dec_session_usage_count(sess); 4299 } 4300 if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) 4301 iscsit_dec_conn_usage_count(conn); 4302 4303 atomic_set(&conn->conn_logout_remove, 0); 4304 atomic_set(&sess->session_reinstatement, 0); 4305 atomic_set(&sess->session_fall_back_to_erl0, 1); 4306 } 4307 4308 spin_lock_bh(&sess->conn_lock); 4309 list_del(&conn->conn_list); 4310 4311 /* 4312 * Attempt to let the Initiator know this connection failed by 4313 * sending an Connection Dropped Async Message on another 4314 * active connection. 4315 */ 4316 if (atomic_read(&conn->connection_recovery)) 4317 iscsit_build_conn_drop_async_message(conn); 4318 4319 spin_unlock_bh(&sess->conn_lock); 4320 4321 /* 4322 * If connection reinstatement is being performed on this connection, 4323 * up the connection reinstatement semaphore that is being blocked on 4324 * in iscsit_cause_connection_reinstatement(). 4325 */ 4326 spin_lock_bh(&conn->state_lock); 4327 if (atomic_read(&conn->sleep_on_conn_wait_comp)) { 4328 spin_unlock_bh(&conn->state_lock); 4329 complete(&conn->conn_wait_comp); 4330 wait_for_completion(&conn->conn_post_wait_comp); 4331 spin_lock_bh(&conn->state_lock); 4332 } 4333 4334 /* 4335 * If connection reinstatement is being performed on this connection 4336 * by receiving a REMOVECONNFORRECOVERY logout request, up the 4337 * connection wait rcfr semaphore that is being blocked on 4338 * an iscsit_connection_reinstatement_rcfr(). 4339 */ 4340 if (atomic_read(&conn->connection_wait_rcfr)) { 4341 spin_unlock_bh(&conn->state_lock); 4342 complete(&conn->conn_wait_rcfr_comp); 4343 wait_for_completion(&conn->conn_post_wait_comp); 4344 spin_lock_bh(&conn->state_lock); 4345 } 4346 atomic_set(&conn->connection_reinstatement, 1); 4347 spin_unlock_bh(&conn->state_lock); 4348 4349 /* 4350 * If any other processes are accessing this connection pointer we 4351 * must wait until they have completed. 4352 */ 4353 iscsit_check_conn_usage_count(conn); 4354 4355 if (conn->sock) 4356 sock_release(conn->sock); 4357 4358 if (conn->conn_transport->iscsit_free_conn) 4359 conn->conn_transport->iscsit_free_conn(conn); 4360 4361 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 4362 conn->conn_state = TARG_CONN_STATE_FREE; 4363 iscsit_free_conn(conn); 4364 4365 spin_lock_bh(&sess->conn_lock); 4366 atomic_dec(&sess->nconn); 4367 pr_debug("Decremented iSCSI connection count to %d from node:" 4368 " %s\n", atomic_read(&sess->nconn), 4369 sess->sess_ops->InitiatorName); 4370 /* 4371 * Make sure that if one connection fails in an non ERL=2 iSCSI 4372 * Session that they all fail. 4373 */ 4374 if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout && 4375 !atomic_read(&sess->session_logout)) 4376 atomic_set(&sess->session_fall_back_to_erl0, 1); 4377 4378 /* 4379 * If this was not the last connection in the session, and we are 4380 * performing session reinstatement or falling back to ERL=0, call 4381 * iscsit_stop_session() without sleeping to shutdown the other 4382 * active connections. 4383 */ 4384 if (atomic_read(&sess->nconn)) { 4385 if (!atomic_read(&sess->session_reinstatement) && 4386 !atomic_read(&sess->session_fall_back_to_erl0)) { 4387 spin_unlock_bh(&sess->conn_lock); 4388 return 0; 4389 } 4390 if (!atomic_read(&sess->session_stop_active)) { 4391 atomic_set(&sess->session_stop_active, 1); 4392 spin_unlock_bh(&sess->conn_lock); 4393 iscsit_stop_session(sess, 0, 0); 4394 return 0; 4395 } 4396 spin_unlock_bh(&sess->conn_lock); 4397 return 0; 4398 } 4399 4400 /* 4401 * If this was the last connection in the session and one of the 4402 * following is occurring: 4403 * 4404 * Session Reinstatement is not being performed, and are falling back 4405 * to ERL=0 call iscsit_close_session(). 4406 * 4407 * Session Logout was requested. iscsit_close_session() will be called 4408 * elsewhere. 4409 * 4410 * Session Continuation is not being performed, start the Time2Retain 4411 * handler and check if sleep_on_sess_wait_sem is active. 4412 */ 4413 if (!atomic_read(&sess->session_reinstatement) && 4414 atomic_read(&sess->session_fall_back_to_erl0)) { 4415 spin_unlock_bh(&sess->conn_lock); 4416 complete_all(&sess->session_wait_comp); 4417 iscsit_close_session(sess, true); 4418 4419 return 0; 4420 } else if (atomic_read(&sess->session_logout)) { 4421 pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); 4422 sess->session_state = TARG_SESS_STATE_FREE; 4423 4424 if (atomic_read(&sess->session_close)) { 4425 spin_unlock_bh(&sess->conn_lock); 4426 complete_all(&sess->session_wait_comp); 4427 iscsit_close_session(sess, true); 4428 } else { 4429 spin_unlock_bh(&sess->conn_lock); 4430 } 4431 4432 return 0; 4433 } else { 4434 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); 4435 sess->session_state = TARG_SESS_STATE_FAILED; 4436 4437 if (!atomic_read(&sess->session_continuation)) 4438 iscsit_start_time2retain_handler(sess); 4439 4440 if (atomic_read(&sess->session_close)) { 4441 spin_unlock_bh(&sess->conn_lock); 4442 complete_all(&sess->session_wait_comp); 4443 iscsit_close_session(sess, true); 4444 } else { 4445 spin_unlock_bh(&sess->conn_lock); 4446 } 4447 4448 return 0; 4449 } 4450 } 4451 4452 /* 4453 * If the iSCSI Session for the iSCSI Initiator Node exists, 4454 * forcefully shutdown the iSCSI NEXUS. 4455 */ 4456 int iscsit_close_session(struct iscsit_session *sess, bool can_sleep) 4457 { 4458 struct iscsi_portal_group *tpg = sess->tpg; 4459 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4460 4461 if (atomic_read(&sess->nconn)) { 4462 pr_err("%d connection(s) still exist for iSCSI session" 4463 " to %s\n", atomic_read(&sess->nconn), 4464 sess->sess_ops->InitiatorName); 4465 BUG(); 4466 } 4467 4468 spin_lock_bh(&se_tpg->session_lock); 4469 atomic_set(&sess->session_logout, 1); 4470 atomic_set(&sess->session_reinstatement, 1); 4471 iscsit_stop_time2retain_timer(sess); 4472 spin_unlock_bh(&se_tpg->session_lock); 4473 4474 if (sess->sess_ops->ErrorRecoveryLevel == 2) 4475 iscsit_free_connection_recovery_entries(sess); 4476 4477 /* 4478 * transport_deregister_session_configfs() will clear the 4479 * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context 4480 * can be setting it again with __transport_register_session() in 4481 * iscsi_post_login_handler() again after the iscsit_stop_session() 4482 * completes in iscsi_np context. 4483 */ 4484 transport_deregister_session_configfs(sess->se_sess); 4485 4486 /* 4487 * If any other processes are accessing this session pointer we must 4488 * wait until they have completed. If we are in an interrupt (the 4489 * time2retain handler) and contain and active session usage count we 4490 * restart the timer and exit. 4491 */ 4492 if (iscsit_check_session_usage_count(sess, can_sleep)) { 4493 atomic_set(&sess->session_logout, 0); 4494 iscsit_start_time2retain_handler(sess); 4495 return 0; 4496 } 4497 4498 transport_deregister_session(sess->se_sess); 4499 4500 iscsit_free_all_ooo_cmdsns(sess); 4501 4502 spin_lock_bh(&se_tpg->session_lock); 4503 pr_debug("Moving to TARG_SESS_STATE_FREE.\n"); 4504 sess->session_state = TARG_SESS_STATE_FREE; 4505 pr_debug("Released iSCSI session from node: %s\n", 4506 sess->sess_ops->InitiatorName); 4507 tpg->nsessions--; 4508 if (tpg->tpg_tiqn) 4509 tpg->tpg_tiqn->tiqn_nsessions--; 4510 4511 pr_debug("Decremented number of active iSCSI Sessions on" 4512 " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions); 4513 4514 ida_free(&sess_ida, sess->session_index); 4515 kfree(sess->sess_ops); 4516 sess->sess_ops = NULL; 4517 spin_unlock_bh(&se_tpg->session_lock); 4518 4519 kfree(sess); 4520 return 0; 4521 } 4522 4523 static void iscsit_logout_post_handler_closesession( 4524 struct iscsit_conn *conn) 4525 { 4526 struct iscsit_session *sess = conn->sess; 4527 int sleep = 1; 4528 /* 4529 * Traditional iscsi/tcp will invoke this logic from TX thread 4530 * context during session logout, so clear tx_thread_active and 4531 * sleep if iscsit_close_connection() has not already occured. 4532 * 4533 * Since iser-target invokes this logic from it's own workqueue, 4534 * always sleep waiting for RX/TX thread shutdown to complete 4535 * within iscsit_close_connection(). 4536 */ 4537 if (!conn->conn_transport->rdma_shutdown) { 4538 sleep = cmpxchg(&conn->tx_thread_active, true, false); 4539 if (!sleep) 4540 return; 4541 } 4542 4543 atomic_set(&conn->conn_logout_remove, 0); 4544 complete(&conn->conn_logout_comp); 4545 4546 iscsit_dec_conn_usage_count(conn); 4547 atomic_set(&sess->session_close, 1); 4548 iscsit_stop_session(sess, sleep, sleep); 4549 iscsit_dec_session_usage_count(sess); 4550 } 4551 4552 static void iscsit_logout_post_handler_samecid( 4553 struct iscsit_conn *conn) 4554 { 4555 int sleep = 1; 4556 4557 if (!conn->conn_transport->rdma_shutdown) { 4558 sleep = cmpxchg(&conn->tx_thread_active, true, false); 4559 if (!sleep) 4560 return; 4561 } 4562 4563 atomic_set(&conn->conn_logout_remove, 0); 4564 complete(&conn->conn_logout_comp); 4565 4566 iscsit_cause_connection_reinstatement(conn, sleep); 4567 iscsit_dec_conn_usage_count(conn); 4568 } 4569 4570 static void iscsit_logout_post_handler_diffcid( 4571 struct iscsit_conn *conn, 4572 u16 cid) 4573 { 4574 struct iscsit_conn *l_conn; 4575 struct iscsit_session *sess = conn->sess; 4576 bool conn_found = false; 4577 4578 if (!sess) 4579 return; 4580 4581 spin_lock_bh(&sess->conn_lock); 4582 list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) { 4583 if (l_conn->cid == cid) { 4584 iscsit_inc_conn_usage_count(l_conn); 4585 conn_found = true; 4586 break; 4587 } 4588 } 4589 spin_unlock_bh(&sess->conn_lock); 4590 4591 if (!conn_found) 4592 return; 4593 4594 if (l_conn->sock) 4595 l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN); 4596 4597 spin_lock_bh(&l_conn->state_lock); 4598 pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n"); 4599 l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT; 4600 spin_unlock_bh(&l_conn->state_lock); 4601 4602 iscsit_cause_connection_reinstatement(l_conn, 1); 4603 iscsit_dec_conn_usage_count(l_conn); 4604 } 4605 4606 /* 4607 * Return of 0 causes the TX thread to restart. 4608 */ 4609 int iscsit_logout_post_handler( 4610 struct iscsit_cmd *cmd, 4611 struct iscsit_conn *conn) 4612 { 4613 int ret = 0; 4614 4615 switch (cmd->logout_reason) { 4616 case ISCSI_LOGOUT_REASON_CLOSE_SESSION: 4617 switch (cmd->logout_response) { 4618 case ISCSI_LOGOUT_SUCCESS: 4619 case ISCSI_LOGOUT_CLEANUP_FAILED: 4620 default: 4621 iscsit_logout_post_handler_closesession(conn); 4622 break; 4623 } 4624 break; 4625 case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION: 4626 if (conn->cid == cmd->logout_cid) { 4627 switch (cmd->logout_response) { 4628 case ISCSI_LOGOUT_SUCCESS: 4629 case ISCSI_LOGOUT_CLEANUP_FAILED: 4630 default: 4631 iscsit_logout_post_handler_samecid(conn); 4632 break; 4633 } 4634 } else { 4635 switch (cmd->logout_response) { 4636 case ISCSI_LOGOUT_SUCCESS: 4637 iscsit_logout_post_handler_diffcid(conn, 4638 cmd->logout_cid); 4639 break; 4640 case ISCSI_LOGOUT_CID_NOT_FOUND: 4641 case ISCSI_LOGOUT_CLEANUP_FAILED: 4642 default: 4643 break; 4644 } 4645 ret = 1; 4646 } 4647 break; 4648 case ISCSI_LOGOUT_REASON_RECOVERY: 4649 switch (cmd->logout_response) { 4650 case ISCSI_LOGOUT_SUCCESS: 4651 case ISCSI_LOGOUT_CID_NOT_FOUND: 4652 case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED: 4653 case ISCSI_LOGOUT_CLEANUP_FAILED: 4654 default: 4655 break; 4656 } 4657 ret = 1; 4658 break; 4659 default: 4660 break; 4661 4662 } 4663 return ret; 4664 } 4665 EXPORT_SYMBOL(iscsit_logout_post_handler); 4666 4667 void iscsit_fail_session(struct iscsit_session *sess) 4668 { 4669 struct iscsit_conn *conn; 4670 4671 spin_lock_bh(&sess->conn_lock); 4672 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 4673 pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n"); 4674 conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT; 4675 } 4676 spin_unlock_bh(&sess->conn_lock); 4677 4678 pr_debug("Moving to TARG_SESS_STATE_FAILED.\n"); 4679 sess->session_state = TARG_SESS_STATE_FAILED; 4680 } 4681 4682 void iscsit_stop_session( 4683 struct iscsit_session *sess, 4684 int session_sleep, 4685 int connection_sleep) 4686 { 4687 u16 conn_count = atomic_read(&sess->nconn); 4688 struct iscsit_conn *conn, *conn_tmp = NULL; 4689 int is_last; 4690 4691 spin_lock_bh(&sess->conn_lock); 4692 4693 if (connection_sleep) { 4694 list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list, 4695 conn_list) { 4696 if (conn_count == 0) 4697 break; 4698 4699 if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) { 4700 is_last = 1; 4701 } else { 4702 iscsit_inc_conn_usage_count(conn_tmp); 4703 is_last = 0; 4704 } 4705 iscsit_inc_conn_usage_count(conn); 4706 4707 spin_unlock_bh(&sess->conn_lock); 4708 iscsit_cause_connection_reinstatement(conn, 1); 4709 spin_lock_bh(&sess->conn_lock); 4710 4711 iscsit_dec_conn_usage_count(conn); 4712 if (is_last == 0) 4713 iscsit_dec_conn_usage_count(conn_tmp); 4714 conn_count--; 4715 } 4716 } else { 4717 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) 4718 iscsit_cause_connection_reinstatement(conn, 0); 4719 } 4720 4721 if (session_sleep && atomic_read(&sess->nconn)) { 4722 spin_unlock_bh(&sess->conn_lock); 4723 wait_for_completion(&sess->session_wait_comp); 4724 } else 4725 spin_unlock_bh(&sess->conn_lock); 4726 } 4727 4728 int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) 4729 { 4730 struct iscsit_session *sess; 4731 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4732 struct se_session *se_sess, *se_sess_tmp; 4733 LIST_HEAD(free_list); 4734 int session_count = 0; 4735 4736 spin_lock_bh(&se_tpg->session_lock); 4737 if (tpg->nsessions && !force) { 4738 spin_unlock_bh(&se_tpg->session_lock); 4739 return -1; 4740 } 4741 4742 list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list, 4743 sess_list) { 4744 sess = (struct iscsit_session *)se_sess->fabric_sess_ptr; 4745 4746 spin_lock(&sess->conn_lock); 4747 if (atomic_read(&sess->session_fall_back_to_erl0) || 4748 atomic_read(&sess->session_logout) || 4749 atomic_read(&sess->session_close) || 4750 (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) { 4751 spin_unlock(&sess->conn_lock); 4752 continue; 4753 } 4754 iscsit_inc_session_usage_count(sess); 4755 atomic_set(&sess->session_reinstatement, 1); 4756 atomic_set(&sess->session_fall_back_to_erl0, 1); 4757 atomic_set(&sess->session_close, 1); 4758 spin_unlock(&sess->conn_lock); 4759 4760 list_move_tail(&se_sess->sess_list, &free_list); 4761 } 4762 spin_unlock_bh(&se_tpg->session_lock); 4763 4764 list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) { 4765 sess = (struct iscsit_session *)se_sess->fabric_sess_ptr; 4766 4767 list_del_init(&se_sess->sess_list); 4768 iscsit_stop_session(sess, 1, 1); 4769 iscsit_dec_session_usage_count(sess); 4770 session_count++; 4771 } 4772 4773 pr_debug("Released %d iSCSI Session(s) from Target Portal" 4774 " Group: %hu\n", session_count, tpg->tpgt); 4775 return 0; 4776 } 4777 4778 MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure"); 4779 MODULE_VERSION("4.1.x"); 4780 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 4781 MODULE_LICENSE("GPL"); 4782 4783 module_init(iscsi_target_init_module); 4784 module_exit(iscsi_target_cleanup_module); 4785