1 /******************************************************************************* 2 * This file contains the iSCSI Target specific utility functions. 3 * 4 * (c) Copyright 2007-2013 Datera, Inc. 5 * 6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 ******************************************************************************/ 18 19 #include <linux/list.h> 20 #include <linux/percpu_ida.h> 21 #include <scsi/scsi_tcq.h> 22 #include <scsi/iscsi_proto.h> 23 #include <target/target_core_base.h> 24 #include <target/target_core_fabric.h> 25 #include <target/iscsi/iscsi_transport.h> 26 27 #include <target/iscsi/iscsi_target_core.h> 28 #include "iscsi_target_parameters.h" 29 #include "iscsi_target_seq_pdu_list.h" 30 #include "iscsi_target_datain_values.h" 31 #include "iscsi_target_erl0.h" 32 #include "iscsi_target_erl1.h" 33 #include "iscsi_target_erl2.h" 34 #include "iscsi_target_tpg.h" 35 #include "iscsi_target_util.h" 36 #include "iscsi_target.h" 37 38 #define PRINT_BUFF(buff, len) \ 39 { \ 40 int zzz; \ 41 \ 42 pr_debug("%d:\n", __LINE__); \ 43 for (zzz = 0; zzz < len; zzz++) { \ 44 if (zzz % 16 == 0) { \ 45 if (zzz) \ 46 pr_debug("\n"); \ 47 pr_debug("%4i: ", zzz); \ 48 } \ 49 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \ 50 } \ 51 if ((len + 1) % 16) \ 52 pr_debug("\n"); \ 53 } 54 55 extern struct list_head g_tiqn_list; 56 extern spinlock_t tiqn_lock; 57 58 /* 59 * Called with cmd->r2t_lock held. 60 */ 61 int iscsit_add_r2t_to_list( 62 struct iscsi_cmd *cmd, 63 u32 offset, 64 u32 xfer_len, 65 int recovery, 66 u32 r2t_sn) 67 { 68 struct iscsi_r2t *r2t; 69 70 r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC); 71 if (!r2t) { 72 pr_err("Unable to allocate memory for struct iscsi_r2t.\n"); 73 return -1; 74 } 75 INIT_LIST_HEAD(&r2t->r2t_list); 76 77 r2t->recovery_r2t = recovery; 78 r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn; 79 r2t->offset = offset; 80 r2t->xfer_len = xfer_len; 81 list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list); 82 spin_unlock_bh(&cmd->r2t_lock); 83 84 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T); 85 86 spin_lock_bh(&cmd->r2t_lock); 87 return 0; 88 } 89 90 struct iscsi_r2t *iscsit_get_r2t_for_eos( 91 struct iscsi_cmd *cmd, 92 u32 offset, 93 u32 length) 94 { 95 struct iscsi_r2t *r2t; 96 97 spin_lock_bh(&cmd->r2t_lock); 98 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 99 if ((r2t->offset <= offset) && 100 (r2t->offset + r2t->xfer_len) >= (offset + length)) { 101 spin_unlock_bh(&cmd->r2t_lock); 102 return r2t; 103 } 104 } 105 spin_unlock_bh(&cmd->r2t_lock); 106 107 pr_err("Unable to locate R2T for Offset: %u, Length:" 108 " %u\n", offset, length); 109 return NULL; 110 } 111 112 struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd) 113 { 114 struct iscsi_r2t *r2t; 115 116 spin_lock_bh(&cmd->r2t_lock); 117 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 118 if (!r2t->sent_r2t) { 119 spin_unlock_bh(&cmd->r2t_lock); 120 return r2t; 121 } 122 } 123 spin_unlock_bh(&cmd->r2t_lock); 124 125 pr_err("Unable to locate next R2T to send for ITT:" 126 " 0x%08x.\n", cmd->init_task_tag); 127 return NULL; 128 } 129 130 /* 131 * Called with cmd->r2t_lock held. 132 */ 133 void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd) 134 { 135 list_del(&r2t->r2t_list); 136 kmem_cache_free(lio_r2t_cache, r2t); 137 } 138 139 void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd) 140 { 141 struct iscsi_r2t *r2t, *r2t_tmp; 142 143 spin_lock_bh(&cmd->r2t_lock); 144 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) 145 iscsit_free_r2t(r2t, cmd); 146 spin_unlock_bh(&cmd->r2t_lock); 147 } 148 149 /* 150 * May be called from software interrupt (timer) context for allocating 151 * iSCSI NopINs. 152 */ 153 struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state) 154 { 155 struct iscsi_cmd *cmd; 156 struct se_session *se_sess = conn->sess->se_sess; 157 int size, tag; 158 159 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); 160 if (tag < 0) 161 return NULL; 162 163 size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; 164 cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); 165 memset(cmd, 0, size); 166 167 cmd->se_cmd.map_tag = tag; 168 cmd->conn = conn; 169 INIT_LIST_HEAD(&cmd->i_conn_node); 170 INIT_LIST_HEAD(&cmd->datain_list); 171 INIT_LIST_HEAD(&cmd->cmd_r2t_list); 172 spin_lock_init(&cmd->datain_lock); 173 spin_lock_init(&cmd->dataout_timeout_lock); 174 spin_lock_init(&cmd->istate_lock); 175 spin_lock_init(&cmd->error_lock); 176 spin_lock_init(&cmd->r2t_lock); 177 178 return cmd; 179 } 180 EXPORT_SYMBOL(iscsit_allocate_cmd); 181 182 struct iscsi_seq *iscsit_get_seq_holder_for_datain( 183 struct iscsi_cmd *cmd, 184 u32 seq_send_order) 185 { 186 u32 i; 187 188 for (i = 0; i < cmd->seq_count; i++) 189 if (cmd->seq_list[i].seq_send_order == seq_send_order) 190 return &cmd->seq_list[i]; 191 192 return NULL; 193 } 194 195 struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd) 196 { 197 u32 i; 198 199 if (!cmd->seq_list) { 200 pr_err("struct iscsi_cmd->seq_list is NULL!\n"); 201 return NULL; 202 } 203 204 for (i = 0; i < cmd->seq_count; i++) { 205 if (cmd->seq_list[i].type != SEQTYPE_NORMAL) 206 continue; 207 if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) { 208 cmd->seq_send_order++; 209 return &cmd->seq_list[i]; 210 } 211 } 212 213 return NULL; 214 } 215 216 struct iscsi_r2t *iscsit_get_holder_for_r2tsn( 217 struct iscsi_cmd *cmd, 218 u32 r2t_sn) 219 { 220 struct iscsi_r2t *r2t; 221 222 spin_lock_bh(&cmd->r2t_lock); 223 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 224 if (r2t->r2t_sn == r2t_sn) { 225 spin_unlock_bh(&cmd->r2t_lock); 226 return r2t; 227 } 228 } 229 spin_unlock_bh(&cmd->r2t_lock); 230 231 return NULL; 232 } 233 234 static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn) 235 { 236 u32 max_cmdsn; 237 int ret; 238 239 /* 240 * This is the proper method of checking received CmdSN against 241 * ExpCmdSN and MaxCmdSN values, as well as accounting for out 242 * or order CmdSNs due to multiple connection sessions and/or 243 * CRC failures. 244 */ 245 max_cmdsn = atomic_read(&sess->max_cmd_sn); 246 if (iscsi_sna_gt(cmdsn, max_cmdsn)) { 247 pr_err("Received CmdSN: 0x%08x is greater than" 248 " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn); 249 ret = CMDSN_MAXCMDSN_OVERRUN; 250 251 } else if (cmdsn == sess->exp_cmd_sn) { 252 sess->exp_cmd_sn++; 253 pr_debug("Received CmdSN matches ExpCmdSN," 254 " incremented ExpCmdSN to: 0x%08x\n", 255 sess->exp_cmd_sn); 256 ret = CMDSN_NORMAL_OPERATION; 257 258 } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) { 259 pr_debug("Received CmdSN: 0x%08x is greater" 260 " than ExpCmdSN: 0x%08x, not acknowledging.\n", 261 cmdsn, sess->exp_cmd_sn); 262 ret = CMDSN_HIGHER_THAN_EXP; 263 264 } else { 265 pr_err("Received CmdSN: 0x%08x is less than" 266 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn, 267 sess->exp_cmd_sn); 268 ret = CMDSN_LOWER_THAN_EXP; 269 } 270 271 return ret; 272 } 273 274 /* 275 * Commands may be received out of order if MC/S is in use. 276 * Ensure they are executed in CmdSN order. 277 */ 278 int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 279 unsigned char *buf, __be32 cmdsn) 280 { 281 int ret, cmdsn_ret; 282 bool reject = false; 283 u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES; 284 285 mutex_lock(&conn->sess->cmdsn_mutex); 286 287 cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn)); 288 switch (cmdsn_ret) { 289 case CMDSN_NORMAL_OPERATION: 290 ret = iscsit_execute_cmd(cmd, 0); 291 if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list)) 292 iscsit_execute_ooo_cmdsns(conn->sess); 293 else if (ret < 0) { 294 reject = true; 295 ret = CMDSN_ERROR_CANNOT_RECOVER; 296 } 297 break; 298 case CMDSN_HIGHER_THAN_EXP: 299 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn)); 300 if (ret < 0) { 301 reject = true; 302 ret = CMDSN_ERROR_CANNOT_RECOVER; 303 break; 304 } 305 ret = CMDSN_HIGHER_THAN_EXP; 306 break; 307 case CMDSN_LOWER_THAN_EXP: 308 case CMDSN_MAXCMDSN_OVERRUN: 309 default: 310 cmd->i_state = ISTATE_REMOVE; 311 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 312 /* 313 * Existing callers for iscsit_sequence_cmd() will silently 314 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this 315 * return for CMDSN_MAXCMDSN_OVERRUN as well.. 316 */ 317 ret = CMDSN_LOWER_THAN_EXP; 318 break; 319 } 320 mutex_unlock(&conn->sess->cmdsn_mutex); 321 322 if (reject) 323 iscsit_reject_cmd(cmd, reason, buf); 324 325 return ret; 326 } 327 EXPORT_SYMBOL(iscsit_sequence_cmd); 328 329 int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf) 330 { 331 struct iscsi_conn *conn = cmd->conn; 332 struct se_cmd *se_cmd = &cmd->se_cmd; 333 struct iscsi_data *hdr = (struct iscsi_data *) buf; 334 u32 payload_length = ntoh24(hdr->dlength); 335 336 if (conn->sess->sess_ops->InitialR2T) { 337 pr_err("Received unexpected unsolicited data" 338 " while InitialR2T=Yes, protocol error.\n"); 339 transport_send_check_condition_and_sense(se_cmd, 340 TCM_UNEXPECTED_UNSOLICITED_DATA, 0); 341 return -1; 342 } 343 344 if ((cmd->first_burst_len + payload_length) > 345 conn->sess->sess_ops->FirstBurstLength) { 346 pr_err("Total %u bytes exceeds FirstBurstLength: %u" 347 " for this Unsolicited DataOut Burst.\n", 348 (cmd->first_burst_len + payload_length), 349 conn->sess->sess_ops->FirstBurstLength); 350 transport_send_check_condition_and_sense(se_cmd, 351 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 352 return -1; 353 } 354 355 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) 356 return 0; 357 358 if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) && 359 ((cmd->first_burst_len + payload_length) != 360 conn->sess->sess_ops->FirstBurstLength)) { 361 pr_err("Unsolicited non-immediate data received %u" 362 " does not equal FirstBurstLength: %u, and does" 363 " not equal ExpXferLen %u.\n", 364 (cmd->first_burst_len + payload_length), 365 conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length); 366 transport_send_check_condition_and_sense(se_cmd, 367 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 368 return -1; 369 } 370 return 0; 371 } 372 373 struct iscsi_cmd *iscsit_find_cmd_from_itt( 374 struct iscsi_conn *conn, 375 itt_t init_task_tag) 376 { 377 struct iscsi_cmd *cmd; 378 379 spin_lock_bh(&conn->cmd_lock); 380 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 381 if (cmd->init_task_tag == init_task_tag) { 382 spin_unlock_bh(&conn->cmd_lock); 383 return cmd; 384 } 385 } 386 spin_unlock_bh(&conn->cmd_lock); 387 388 pr_err("Unable to locate ITT: 0x%08x on CID: %hu", 389 init_task_tag, conn->cid); 390 return NULL; 391 } 392 EXPORT_SYMBOL(iscsit_find_cmd_from_itt); 393 394 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( 395 struct iscsi_conn *conn, 396 itt_t init_task_tag, 397 u32 length) 398 { 399 struct iscsi_cmd *cmd; 400 401 spin_lock_bh(&conn->cmd_lock); 402 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 403 if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) 404 continue; 405 if (cmd->init_task_tag == init_task_tag) { 406 spin_unlock_bh(&conn->cmd_lock); 407 return cmd; 408 } 409 } 410 spin_unlock_bh(&conn->cmd_lock); 411 412 pr_err("Unable to locate ITT: 0x%08x on CID: %hu," 413 " dumping payload\n", init_task_tag, conn->cid); 414 if (length) 415 iscsit_dump_data_payload(conn, length, 1); 416 417 return NULL; 418 } 419 420 struct iscsi_cmd *iscsit_find_cmd_from_ttt( 421 struct iscsi_conn *conn, 422 u32 targ_xfer_tag) 423 { 424 struct iscsi_cmd *cmd = NULL; 425 426 spin_lock_bh(&conn->cmd_lock); 427 list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) { 428 if (cmd->targ_xfer_tag == targ_xfer_tag) { 429 spin_unlock_bh(&conn->cmd_lock); 430 return cmd; 431 } 432 } 433 spin_unlock_bh(&conn->cmd_lock); 434 435 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n", 436 targ_xfer_tag, conn->cid); 437 return NULL; 438 } 439 440 int iscsit_find_cmd_for_recovery( 441 struct iscsi_session *sess, 442 struct iscsi_cmd **cmd_ptr, 443 struct iscsi_conn_recovery **cr_ptr, 444 itt_t init_task_tag) 445 { 446 struct iscsi_cmd *cmd = NULL; 447 struct iscsi_conn_recovery *cr; 448 /* 449 * Scan through the inactive connection recovery list's command list. 450 * If init_task_tag matches the command is still alligent. 451 */ 452 spin_lock(&sess->cr_i_lock); 453 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { 454 spin_lock(&cr->conn_recovery_cmd_lock); 455 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { 456 if (cmd->init_task_tag == init_task_tag) { 457 spin_unlock(&cr->conn_recovery_cmd_lock); 458 spin_unlock(&sess->cr_i_lock); 459 460 *cr_ptr = cr; 461 *cmd_ptr = cmd; 462 return -2; 463 } 464 } 465 spin_unlock(&cr->conn_recovery_cmd_lock); 466 } 467 spin_unlock(&sess->cr_i_lock); 468 /* 469 * Scan through the active connection recovery list's command list. 470 * If init_task_tag matches the command is ready to be reassigned. 471 */ 472 spin_lock(&sess->cr_a_lock); 473 list_for_each_entry(cr, &sess->cr_active_list, cr_list) { 474 spin_lock(&cr->conn_recovery_cmd_lock); 475 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) { 476 if (cmd->init_task_tag == init_task_tag) { 477 spin_unlock(&cr->conn_recovery_cmd_lock); 478 spin_unlock(&sess->cr_a_lock); 479 480 *cr_ptr = cr; 481 *cmd_ptr = cmd; 482 return 0; 483 } 484 } 485 spin_unlock(&cr->conn_recovery_cmd_lock); 486 } 487 spin_unlock(&sess->cr_a_lock); 488 489 return -1; 490 } 491 492 void iscsit_add_cmd_to_immediate_queue( 493 struct iscsi_cmd *cmd, 494 struct iscsi_conn *conn, 495 u8 state) 496 { 497 struct iscsi_queue_req *qr; 498 499 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); 500 if (!qr) { 501 pr_err("Unable to allocate memory for" 502 " struct iscsi_queue_req\n"); 503 return; 504 } 505 INIT_LIST_HEAD(&qr->qr_list); 506 qr->cmd = cmd; 507 qr->state = state; 508 509 spin_lock_bh(&conn->immed_queue_lock); 510 list_add_tail(&qr->qr_list, &conn->immed_queue_list); 511 atomic_inc(&cmd->immed_queue_count); 512 atomic_set(&conn->check_immediate_queue, 1); 513 spin_unlock_bh(&conn->immed_queue_lock); 514 515 wake_up(&conn->queues_wq); 516 } 517 518 struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) 519 { 520 struct iscsi_queue_req *qr; 521 522 spin_lock_bh(&conn->immed_queue_lock); 523 if (list_empty(&conn->immed_queue_list)) { 524 spin_unlock_bh(&conn->immed_queue_lock); 525 return NULL; 526 } 527 qr = list_first_entry(&conn->immed_queue_list, 528 struct iscsi_queue_req, qr_list); 529 530 list_del(&qr->qr_list); 531 if (qr->cmd) 532 atomic_dec(&qr->cmd->immed_queue_count); 533 spin_unlock_bh(&conn->immed_queue_lock); 534 535 return qr; 536 } 537 538 static void iscsit_remove_cmd_from_immediate_queue( 539 struct iscsi_cmd *cmd, 540 struct iscsi_conn *conn) 541 { 542 struct iscsi_queue_req *qr, *qr_tmp; 543 544 spin_lock_bh(&conn->immed_queue_lock); 545 if (!atomic_read(&cmd->immed_queue_count)) { 546 spin_unlock_bh(&conn->immed_queue_lock); 547 return; 548 } 549 550 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { 551 if (qr->cmd != cmd) 552 continue; 553 554 atomic_dec(&qr->cmd->immed_queue_count); 555 list_del(&qr->qr_list); 556 kmem_cache_free(lio_qr_cache, qr); 557 } 558 spin_unlock_bh(&conn->immed_queue_lock); 559 560 if (atomic_read(&cmd->immed_queue_count)) { 561 pr_err("ITT: 0x%08x immed_queue_count: %d\n", 562 cmd->init_task_tag, 563 atomic_read(&cmd->immed_queue_count)); 564 } 565 } 566 567 void iscsit_add_cmd_to_response_queue( 568 struct iscsi_cmd *cmd, 569 struct iscsi_conn *conn, 570 u8 state) 571 { 572 struct iscsi_queue_req *qr; 573 574 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); 575 if (!qr) { 576 pr_err("Unable to allocate memory for" 577 " struct iscsi_queue_req\n"); 578 return; 579 } 580 INIT_LIST_HEAD(&qr->qr_list); 581 qr->cmd = cmd; 582 qr->state = state; 583 584 spin_lock_bh(&conn->response_queue_lock); 585 list_add_tail(&qr->qr_list, &conn->response_queue_list); 586 atomic_inc(&cmd->response_queue_count); 587 spin_unlock_bh(&conn->response_queue_lock); 588 589 wake_up(&conn->queues_wq); 590 } 591 592 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) 593 { 594 struct iscsi_queue_req *qr; 595 596 spin_lock_bh(&conn->response_queue_lock); 597 if (list_empty(&conn->response_queue_list)) { 598 spin_unlock_bh(&conn->response_queue_lock); 599 return NULL; 600 } 601 602 qr = list_first_entry(&conn->response_queue_list, 603 struct iscsi_queue_req, qr_list); 604 605 list_del(&qr->qr_list); 606 if (qr->cmd) 607 atomic_dec(&qr->cmd->response_queue_count); 608 spin_unlock_bh(&conn->response_queue_lock); 609 610 return qr; 611 } 612 613 static void iscsit_remove_cmd_from_response_queue( 614 struct iscsi_cmd *cmd, 615 struct iscsi_conn *conn) 616 { 617 struct iscsi_queue_req *qr, *qr_tmp; 618 619 spin_lock_bh(&conn->response_queue_lock); 620 if (!atomic_read(&cmd->response_queue_count)) { 621 spin_unlock_bh(&conn->response_queue_lock); 622 return; 623 } 624 625 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, 626 qr_list) { 627 if (qr->cmd != cmd) 628 continue; 629 630 atomic_dec(&qr->cmd->response_queue_count); 631 list_del(&qr->qr_list); 632 kmem_cache_free(lio_qr_cache, qr); 633 } 634 spin_unlock_bh(&conn->response_queue_lock); 635 636 if (atomic_read(&cmd->response_queue_count)) { 637 pr_err("ITT: 0x%08x response_queue_count: %d\n", 638 cmd->init_task_tag, 639 atomic_read(&cmd->response_queue_count)); 640 } 641 } 642 643 bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn) 644 { 645 bool empty; 646 647 spin_lock_bh(&conn->immed_queue_lock); 648 empty = list_empty(&conn->immed_queue_list); 649 spin_unlock_bh(&conn->immed_queue_lock); 650 651 if (!empty) 652 return empty; 653 654 spin_lock_bh(&conn->response_queue_lock); 655 empty = list_empty(&conn->response_queue_list); 656 spin_unlock_bh(&conn->response_queue_lock); 657 658 return empty; 659 } 660 661 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) 662 { 663 struct iscsi_queue_req *qr, *qr_tmp; 664 665 spin_lock_bh(&conn->immed_queue_lock); 666 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { 667 list_del(&qr->qr_list); 668 if (qr->cmd) 669 atomic_dec(&qr->cmd->immed_queue_count); 670 671 kmem_cache_free(lio_qr_cache, qr); 672 } 673 spin_unlock_bh(&conn->immed_queue_lock); 674 675 spin_lock_bh(&conn->response_queue_lock); 676 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, 677 qr_list) { 678 list_del(&qr->qr_list); 679 if (qr->cmd) 680 atomic_dec(&qr->cmd->response_queue_count); 681 682 kmem_cache_free(lio_qr_cache, qr); 683 } 684 spin_unlock_bh(&conn->response_queue_lock); 685 } 686 687 void iscsit_release_cmd(struct iscsi_cmd *cmd) 688 { 689 struct iscsi_session *sess; 690 struct se_cmd *se_cmd = &cmd->se_cmd; 691 692 if (cmd->conn) 693 sess = cmd->conn->sess; 694 else 695 sess = cmd->sess; 696 697 BUG_ON(!sess || !sess->se_sess); 698 699 kfree(cmd->buf_ptr); 700 kfree(cmd->pdu_list); 701 kfree(cmd->seq_list); 702 kfree(cmd->tmr_req); 703 kfree(cmd->iov_data); 704 kfree(cmd->text_in_ptr); 705 706 percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag); 707 } 708 EXPORT_SYMBOL(iscsit_release_cmd); 709 710 void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, 711 bool check_queues) 712 { 713 struct iscsi_conn *conn = cmd->conn; 714 715 if (scsi_cmd) { 716 if (cmd->data_direction == DMA_TO_DEVICE) { 717 iscsit_stop_dataout_timer(cmd); 718 iscsit_free_r2ts_from_list(cmd); 719 } 720 if (cmd->data_direction == DMA_FROM_DEVICE) 721 iscsit_free_all_datain_reqs(cmd); 722 } 723 724 if (conn && check_queues) { 725 iscsit_remove_cmd_from_immediate_queue(cmd, conn); 726 iscsit_remove_cmd_from_response_queue(cmd, conn); 727 } 728 } 729 730 void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) 731 { 732 struct se_cmd *se_cmd = NULL; 733 int rc; 734 /* 735 * Determine if a struct se_cmd is associated with 736 * this struct iscsi_cmd. 737 */ 738 switch (cmd->iscsi_opcode) { 739 case ISCSI_OP_SCSI_CMD: 740 se_cmd = &cmd->se_cmd; 741 __iscsit_free_cmd(cmd, true, shutdown); 742 /* 743 * Fallthrough 744 */ 745 case ISCSI_OP_SCSI_TMFUNC: 746 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); 747 if (!rc && shutdown && se_cmd && se_cmd->se_sess) { 748 __iscsit_free_cmd(cmd, true, shutdown); 749 target_put_sess_cmd(se_cmd); 750 } 751 break; 752 case ISCSI_OP_REJECT: 753 /* 754 * Handle special case for REJECT when iscsi_add_reject*() has 755 * overwritten the original iscsi_opcode assignment, and the 756 * associated cmd->se_cmd needs to be released. 757 */ 758 if (cmd->se_cmd.se_tfo != NULL) { 759 se_cmd = &cmd->se_cmd; 760 __iscsit_free_cmd(cmd, true, shutdown); 761 762 rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); 763 if (!rc && shutdown && se_cmd->se_sess) { 764 __iscsit_free_cmd(cmd, true, shutdown); 765 target_put_sess_cmd(se_cmd); 766 } 767 break; 768 } 769 /* Fall-through */ 770 default: 771 __iscsit_free_cmd(cmd, false, shutdown); 772 iscsit_release_cmd(cmd); 773 break; 774 } 775 } 776 777 int iscsit_check_session_usage_count(struct iscsi_session *sess) 778 { 779 spin_lock_bh(&sess->session_usage_lock); 780 if (sess->session_usage_count != 0) { 781 sess->session_waiting_on_uc = 1; 782 spin_unlock_bh(&sess->session_usage_lock); 783 if (in_interrupt()) 784 return 2; 785 786 wait_for_completion(&sess->session_waiting_on_uc_comp); 787 return 1; 788 } 789 spin_unlock_bh(&sess->session_usage_lock); 790 791 return 0; 792 } 793 794 void iscsit_dec_session_usage_count(struct iscsi_session *sess) 795 { 796 spin_lock_bh(&sess->session_usage_lock); 797 sess->session_usage_count--; 798 799 if (!sess->session_usage_count && sess->session_waiting_on_uc) 800 complete(&sess->session_waiting_on_uc_comp); 801 802 spin_unlock_bh(&sess->session_usage_lock); 803 } 804 805 void iscsit_inc_session_usage_count(struct iscsi_session *sess) 806 { 807 spin_lock_bh(&sess->session_usage_lock); 808 sess->session_usage_count++; 809 spin_unlock_bh(&sess->session_usage_lock); 810 } 811 812 struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid) 813 { 814 struct iscsi_conn *conn; 815 816 spin_lock_bh(&sess->conn_lock); 817 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 818 if ((conn->cid == cid) && 819 (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) { 820 iscsit_inc_conn_usage_count(conn); 821 spin_unlock_bh(&sess->conn_lock); 822 return conn; 823 } 824 } 825 spin_unlock_bh(&sess->conn_lock); 826 827 return NULL; 828 } 829 830 struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid) 831 { 832 struct iscsi_conn *conn; 833 834 spin_lock_bh(&sess->conn_lock); 835 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 836 if (conn->cid == cid) { 837 iscsit_inc_conn_usage_count(conn); 838 spin_lock(&conn->state_lock); 839 atomic_set(&conn->connection_wait_rcfr, 1); 840 spin_unlock(&conn->state_lock); 841 spin_unlock_bh(&sess->conn_lock); 842 return conn; 843 } 844 } 845 spin_unlock_bh(&sess->conn_lock); 846 847 return NULL; 848 } 849 850 void iscsit_check_conn_usage_count(struct iscsi_conn *conn) 851 { 852 spin_lock_bh(&conn->conn_usage_lock); 853 if (conn->conn_usage_count != 0) { 854 conn->conn_waiting_on_uc = 1; 855 spin_unlock_bh(&conn->conn_usage_lock); 856 857 wait_for_completion(&conn->conn_waiting_on_uc_comp); 858 return; 859 } 860 spin_unlock_bh(&conn->conn_usage_lock); 861 } 862 863 void iscsit_dec_conn_usage_count(struct iscsi_conn *conn) 864 { 865 spin_lock_bh(&conn->conn_usage_lock); 866 conn->conn_usage_count--; 867 868 if (!conn->conn_usage_count && conn->conn_waiting_on_uc) 869 complete(&conn->conn_waiting_on_uc_comp); 870 871 spin_unlock_bh(&conn->conn_usage_lock); 872 } 873 874 void iscsit_inc_conn_usage_count(struct iscsi_conn *conn) 875 { 876 spin_lock_bh(&conn->conn_usage_lock); 877 conn->conn_usage_count++; 878 spin_unlock_bh(&conn->conn_usage_lock); 879 } 880 881 static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) 882 { 883 u8 state; 884 struct iscsi_cmd *cmd; 885 886 cmd = iscsit_allocate_cmd(conn, TASK_RUNNING); 887 if (!cmd) 888 return -1; 889 890 cmd->iscsi_opcode = ISCSI_OP_NOOP_IN; 891 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : 892 ISTATE_SEND_NOPIN_NO_RESPONSE; 893 cmd->init_task_tag = RESERVED_ITT; 894 cmd->targ_xfer_tag = (want_response) ? 895 session_get_next_ttt(conn->sess) : 0xFFFFFFFF; 896 spin_lock_bh(&conn->cmd_lock); 897 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 898 spin_unlock_bh(&conn->cmd_lock); 899 900 if (want_response) 901 iscsit_start_nopin_response_timer(conn); 902 iscsit_add_cmd_to_immediate_queue(cmd, conn, state); 903 904 return 0; 905 } 906 907 static void iscsit_handle_nopin_response_timeout(unsigned long data) 908 { 909 struct iscsi_conn *conn = (struct iscsi_conn *) data; 910 911 iscsit_inc_conn_usage_count(conn); 912 913 spin_lock_bh(&conn->nopin_timer_lock); 914 if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) { 915 spin_unlock_bh(&conn->nopin_timer_lock); 916 iscsit_dec_conn_usage_count(conn); 917 return; 918 } 919 920 pr_debug("Did not receive response to NOPIN on CID: %hu on" 921 " SID: %u, failing connection.\n", conn->cid, 922 conn->sess->sid); 923 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 924 spin_unlock_bh(&conn->nopin_timer_lock); 925 926 { 927 struct iscsi_portal_group *tpg = conn->sess->tpg; 928 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 929 930 if (tiqn) { 931 spin_lock_bh(&tiqn->sess_err_stats.lock); 932 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, 933 conn->sess->sess_ops->InitiatorName); 934 tiqn->sess_err_stats.last_sess_failure_type = 935 ISCSI_SESS_ERR_CXN_TIMEOUT; 936 tiqn->sess_err_stats.cxn_timeout_errors++; 937 atomic_long_inc(&conn->sess->conn_timeout_errors); 938 spin_unlock_bh(&tiqn->sess_err_stats.lock); 939 } 940 } 941 942 iscsit_cause_connection_reinstatement(conn, 0); 943 iscsit_dec_conn_usage_count(conn); 944 } 945 946 void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn) 947 { 948 struct iscsi_session *sess = conn->sess; 949 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 950 951 spin_lock_bh(&conn->nopin_timer_lock); 952 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { 953 spin_unlock_bh(&conn->nopin_timer_lock); 954 return; 955 } 956 957 mod_timer(&conn->nopin_response_timer, 958 (get_jiffies_64() + na->nopin_response_timeout * HZ)); 959 spin_unlock_bh(&conn->nopin_timer_lock); 960 } 961 962 /* 963 * Called with conn->nopin_timer_lock held. 964 */ 965 void iscsit_start_nopin_response_timer(struct iscsi_conn *conn) 966 { 967 struct iscsi_session *sess = conn->sess; 968 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 969 970 spin_lock_bh(&conn->nopin_timer_lock); 971 if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) { 972 spin_unlock_bh(&conn->nopin_timer_lock); 973 return; 974 } 975 976 init_timer(&conn->nopin_response_timer); 977 conn->nopin_response_timer.expires = 978 (get_jiffies_64() + na->nopin_response_timeout * HZ); 979 conn->nopin_response_timer.data = (unsigned long)conn; 980 conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout; 981 conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP; 982 conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING; 983 add_timer(&conn->nopin_response_timer); 984 985 pr_debug("Started NOPIN Response Timer on CID: %d to %u" 986 " seconds\n", conn->cid, na->nopin_response_timeout); 987 spin_unlock_bh(&conn->nopin_timer_lock); 988 } 989 990 void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn) 991 { 992 spin_lock_bh(&conn->nopin_timer_lock); 993 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { 994 spin_unlock_bh(&conn->nopin_timer_lock); 995 return; 996 } 997 conn->nopin_response_timer_flags |= ISCSI_TF_STOP; 998 spin_unlock_bh(&conn->nopin_timer_lock); 999 1000 del_timer_sync(&conn->nopin_response_timer); 1001 1002 spin_lock_bh(&conn->nopin_timer_lock); 1003 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 1004 spin_unlock_bh(&conn->nopin_timer_lock); 1005 } 1006 1007 static void iscsit_handle_nopin_timeout(unsigned long data) 1008 { 1009 struct iscsi_conn *conn = (struct iscsi_conn *) data; 1010 1011 iscsit_inc_conn_usage_count(conn); 1012 1013 spin_lock_bh(&conn->nopin_timer_lock); 1014 if (conn->nopin_timer_flags & ISCSI_TF_STOP) { 1015 spin_unlock_bh(&conn->nopin_timer_lock); 1016 iscsit_dec_conn_usage_count(conn); 1017 return; 1018 } 1019 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; 1020 spin_unlock_bh(&conn->nopin_timer_lock); 1021 1022 iscsit_add_nopin(conn, 1); 1023 iscsit_dec_conn_usage_count(conn); 1024 } 1025 1026 /* 1027 * Called with conn->nopin_timer_lock held. 1028 */ 1029 void __iscsit_start_nopin_timer(struct iscsi_conn *conn) 1030 { 1031 struct iscsi_session *sess = conn->sess; 1032 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1033 /* 1034 * NOPIN timeout is disabled. 1035 */ 1036 if (!na->nopin_timeout) 1037 return; 1038 1039 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) 1040 return; 1041 1042 init_timer(&conn->nopin_timer); 1043 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); 1044 conn->nopin_timer.data = (unsigned long)conn; 1045 conn->nopin_timer.function = iscsit_handle_nopin_timeout; 1046 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1047 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1048 add_timer(&conn->nopin_timer); 1049 1050 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1051 " interval\n", conn->cid, na->nopin_timeout); 1052 } 1053 1054 void iscsit_start_nopin_timer(struct iscsi_conn *conn) 1055 { 1056 struct iscsi_session *sess = conn->sess; 1057 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1058 /* 1059 * NOPIN timeout is disabled.. 1060 */ 1061 if (!na->nopin_timeout) 1062 return; 1063 1064 spin_lock_bh(&conn->nopin_timer_lock); 1065 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) { 1066 spin_unlock_bh(&conn->nopin_timer_lock); 1067 return; 1068 } 1069 1070 init_timer(&conn->nopin_timer); 1071 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); 1072 conn->nopin_timer.data = (unsigned long)conn; 1073 conn->nopin_timer.function = iscsit_handle_nopin_timeout; 1074 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1075 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1076 add_timer(&conn->nopin_timer); 1077 1078 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1079 " interval\n", conn->cid, na->nopin_timeout); 1080 spin_unlock_bh(&conn->nopin_timer_lock); 1081 } 1082 1083 void iscsit_stop_nopin_timer(struct iscsi_conn *conn) 1084 { 1085 spin_lock_bh(&conn->nopin_timer_lock); 1086 if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) { 1087 spin_unlock_bh(&conn->nopin_timer_lock); 1088 return; 1089 } 1090 conn->nopin_timer_flags |= ISCSI_TF_STOP; 1091 spin_unlock_bh(&conn->nopin_timer_lock); 1092 1093 del_timer_sync(&conn->nopin_timer); 1094 1095 spin_lock_bh(&conn->nopin_timer_lock); 1096 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; 1097 spin_unlock_bh(&conn->nopin_timer_lock); 1098 } 1099 1100 int iscsit_send_tx_data( 1101 struct iscsi_cmd *cmd, 1102 struct iscsi_conn *conn, 1103 int use_misc) 1104 { 1105 int tx_sent, tx_size; 1106 u32 iov_count; 1107 struct kvec *iov; 1108 1109 send_data: 1110 tx_size = cmd->tx_size; 1111 1112 if (!use_misc) { 1113 iov = &cmd->iov_data[0]; 1114 iov_count = cmd->iov_data_count; 1115 } else { 1116 iov = &cmd->iov_misc[0]; 1117 iov_count = cmd->iov_misc_count; 1118 } 1119 1120 tx_sent = tx_data(conn, &iov[0], iov_count, tx_size); 1121 if (tx_size != tx_sent) { 1122 if (tx_sent == -EAGAIN) { 1123 pr_err("tx_data() returned -EAGAIN\n"); 1124 goto send_data; 1125 } else 1126 return -1; 1127 } 1128 cmd->tx_size = 0; 1129 1130 return 0; 1131 } 1132 1133 int iscsit_fe_sendpage_sg( 1134 struct iscsi_cmd *cmd, 1135 struct iscsi_conn *conn) 1136 { 1137 struct scatterlist *sg = cmd->first_data_sg; 1138 struct kvec iov; 1139 u32 tx_hdr_size, data_len; 1140 u32 offset = cmd->first_data_sg_off; 1141 int tx_sent, iov_off; 1142 1143 send_hdr: 1144 tx_hdr_size = ISCSI_HDR_LEN; 1145 if (conn->conn_ops->HeaderDigest) 1146 tx_hdr_size += ISCSI_CRC_LEN; 1147 1148 iov.iov_base = cmd->pdu; 1149 iov.iov_len = tx_hdr_size; 1150 1151 tx_sent = tx_data(conn, &iov, 1, tx_hdr_size); 1152 if (tx_hdr_size != tx_sent) { 1153 if (tx_sent == -EAGAIN) { 1154 pr_err("tx_data() returned -EAGAIN\n"); 1155 goto send_hdr; 1156 } 1157 return -1; 1158 } 1159 1160 data_len = cmd->tx_size - tx_hdr_size - cmd->padding; 1161 /* 1162 * Set iov_off used by padding and data digest tx_data() calls below 1163 * in order to determine proper offset into cmd->iov_data[] 1164 */ 1165 if (conn->conn_ops->DataDigest) { 1166 data_len -= ISCSI_CRC_LEN; 1167 if (cmd->padding) 1168 iov_off = (cmd->iov_data_count - 2); 1169 else 1170 iov_off = (cmd->iov_data_count - 1); 1171 } else { 1172 iov_off = (cmd->iov_data_count - 1); 1173 } 1174 /* 1175 * Perform sendpage() for each page in the scatterlist 1176 */ 1177 while (data_len) { 1178 u32 space = (sg->length - offset); 1179 u32 sub_len = min_t(u32, data_len, space); 1180 send_pg: 1181 tx_sent = conn->sock->ops->sendpage(conn->sock, 1182 sg_page(sg), sg->offset + offset, sub_len, 0); 1183 if (tx_sent != sub_len) { 1184 if (tx_sent == -EAGAIN) { 1185 pr_err("tcp_sendpage() returned" 1186 " -EAGAIN\n"); 1187 goto send_pg; 1188 } 1189 1190 pr_err("tcp_sendpage() failure: %d\n", 1191 tx_sent); 1192 return -1; 1193 } 1194 1195 data_len -= sub_len; 1196 offset = 0; 1197 sg = sg_next(sg); 1198 } 1199 1200 send_padding: 1201 if (cmd->padding) { 1202 struct kvec *iov_p = &cmd->iov_data[iov_off++]; 1203 1204 tx_sent = tx_data(conn, iov_p, 1, cmd->padding); 1205 if (cmd->padding != tx_sent) { 1206 if (tx_sent == -EAGAIN) { 1207 pr_err("tx_data() returned -EAGAIN\n"); 1208 goto send_padding; 1209 } 1210 return -1; 1211 } 1212 } 1213 1214 send_datacrc: 1215 if (conn->conn_ops->DataDigest) { 1216 struct kvec *iov_d = &cmd->iov_data[iov_off]; 1217 1218 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); 1219 if (ISCSI_CRC_LEN != tx_sent) { 1220 if (tx_sent == -EAGAIN) { 1221 pr_err("tx_data() returned -EAGAIN\n"); 1222 goto send_datacrc; 1223 } 1224 return -1; 1225 } 1226 } 1227 1228 return 0; 1229 } 1230 1231 /* 1232 * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU 1233 * back to the Initiator when an expection condition occurs with the 1234 * errors set in status_class and status_detail. 1235 * 1236 * Parameters: iSCSI Connection, Status Class, Status Detail. 1237 * Returns: 0 on success, -1 on error. 1238 */ 1239 int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail) 1240 { 1241 struct iscsi_login_rsp *hdr; 1242 struct iscsi_login *login = conn->conn_login; 1243 1244 login->login_failed = 1; 1245 iscsit_collect_login_stats(conn, status_class, status_detail); 1246 1247 memset(&login->rsp[0], 0, ISCSI_HDR_LEN); 1248 1249 hdr = (struct iscsi_login_rsp *)&login->rsp[0]; 1250 hdr->opcode = ISCSI_OP_LOGIN_RSP; 1251 hdr->status_class = status_class; 1252 hdr->status_detail = status_detail; 1253 hdr->itt = conn->login_itt; 1254 1255 return conn->conn_transport->iscsit_put_login_tx(conn, login, 0); 1256 } 1257 1258 void iscsit_print_session_params(struct iscsi_session *sess) 1259 { 1260 struct iscsi_conn *conn; 1261 1262 pr_debug("-----------------------------[Session Params for" 1263 " SID: %u]-----------------------------\n", sess->sid); 1264 spin_lock_bh(&sess->conn_lock); 1265 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) 1266 iscsi_dump_conn_ops(conn->conn_ops); 1267 spin_unlock_bh(&sess->conn_lock); 1268 1269 iscsi_dump_sess_ops(sess->sess_ops); 1270 } 1271 1272 static int iscsit_do_rx_data( 1273 struct iscsi_conn *conn, 1274 struct iscsi_data_count *count) 1275 { 1276 int data = count->data_length, rx_loop = 0, total_rx = 0; 1277 struct msghdr msg; 1278 1279 if (!conn || !conn->sock || !conn->conn_ops) 1280 return -1; 1281 1282 memset(&msg, 0, sizeof(struct msghdr)); 1283 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, 1284 count->iov, count->iov_count, data); 1285 1286 while (total_rx < data) { 1287 rx_loop = sock_recvmsg(conn->sock, &msg, 1288 (data - total_rx), MSG_WAITALL); 1289 if (rx_loop <= 0) { 1290 pr_debug("rx_loop: %d total_rx: %d\n", 1291 rx_loop, total_rx); 1292 return rx_loop; 1293 } 1294 total_rx += rx_loop; 1295 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n", 1296 rx_loop, total_rx, data); 1297 } 1298 1299 return total_rx; 1300 } 1301 1302 static int iscsit_do_tx_data( 1303 struct iscsi_conn *conn, 1304 struct iscsi_data_count *count) 1305 { 1306 int ret, iov_len; 1307 struct kvec *iov_p; 1308 struct msghdr msg; 1309 1310 if (!conn || !conn->sock || !conn->conn_ops) 1311 return -1; 1312 1313 if (count->data_length <= 0) { 1314 pr_err("Data length is: %d\n", count->data_length); 1315 return -1; 1316 } 1317 1318 memset(&msg, 0, sizeof(struct msghdr)); 1319 1320 iov_p = count->iov; 1321 iov_len = count->iov_count; 1322 1323 ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, 1324 count->data_length); 1325 if (ret != count->data_length) { 1326 pr_err("Unexpected ret: %d send data %d\n", 1327 ret, count->data_length); 1328 return -EPIPE; 1329 } 1330 pr_debug("ret: %d, sent data: %d\n", ret, count->data_length); 1331 1332 return ret; 1333 } 1334 1335 int rx_data( 1336 struct iscsi_conn *conn, 1337 struct kvec *iov, 1338 int iov_count, 1339 int data) 1340 { 1341 struct iscsi_data_count c; 1342 1343 if (!conn || !conn->sock || !conn->conn_ops) 1344 return -1; 1345 1346 memset(&c, 0, sizeof(struct iscsi_data_count)); 1347 c.iov = iov; 1348 c.iov_count = iov_count; 1349 c.data_length = data; 1350 c.type = ISCSI_RX_DATA; 1351 1352 return iscsit_do_rx_data(conn, &c); 1353 } 1354 1355 int tx_data( 1356 struct iscsi_conn *conn, 1357 struct kvec *iov, 1358 int iov_count, 1359 int data) 1360 { 1361 struct iscsi_data_count c; 1362 1363 if (!conn || !conn->sock || !conn->conn_ops) 1364 return -1; 1365 1366 memset(&c, 0, sizeof(struct iscsi_data_count)); 1367 c.iov = iov; 1368 c.iov_count = iov_count; 1369 c.data_length = data; 1370 c.type = ISCSI_TX_DATA; 1371 1372 return iscsit_do_tx_data(conn, &c); 1373 } 1374 1375 static bool sockaddr_equal(struct sockaddr_storage *x, struct sockaddr_storage *y) 1376 { 1377 switch (x->ss_family) { 1378 case AF_INET: { 1379 struct sockaddr_in *sinx = (struct sockaddr_in *)x; 1380 struct sockaddr_in *siny = (struct sockaddr_in *)y; 1381 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr) 1382 return false; 1383 if (sinx->sin_port != siny->sin_port) 1384 return false; 1385 break; 1386 } 1387 case AF_INET6: { 1388 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x; 1389 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y; 1390 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr)) 1391 return false; 1392 if (sinx->sin6_port != siny->sin6_port) 1393 return false; 1394 break; 1395 } 1396 default: 1397 return false; 1398 } 1399 return true; 1400 } 1401 1402 void iscsit_collect_login_stats( 1403 struct iscsi_conn *conn, 1404 u8 status_class, 1405 u8 status_detail) 1406 { 1407 struct iscsi_param *intrname = NULL; 1408 struct iscsi_tiqn *tiqn; 1409 struct iscsi_login_stats *ls; 1410 1411 tiqn = iscsit_snmp_get_tiqn(conn); 1412 if (!tiqn) 1413 return; 1414 1415 ls = &tiqn->login_stats; 1416 1417 spin_lock(&ls->lock); 1418 if (sockaddr_equal(&conn->login_sockaddr, &ls->last_intr_fail_sockaddr) && 1419 ((get_jiffies_64() - ls->last_fail_time) < 10)) { 1420 /* We already have the failure info for this login */ 1421 spin_unlock(&ls->lock); 1422 return; 1423 } 1424 1425 if (status_class == ISCSI_STATUS_CLS_SUCCESS) 1426 ls->accepts++; 1427 else if (status_class == ISCSI_STATUS_CLS_REDIRECT) { 1428 ls->redirects++; 1429 ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT; 1430 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1431 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) { 1432 ls->authenticate_fails++; 1433 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE; 1434 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1435 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) { 1436 ls->authorize_fails++; 1437 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE; 1438 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1439 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) { 1440 ls->negotiate_fails++; 1441 ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE; 1442 } else { 1443 ls->other_fails++; 1444 ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER; 1445 } 1446 1447 /* Save initiator name, ip address and time, if it is a failed login */ 1448 if (status_class != ISCSI_STATUS_CLS_SUCCESS) { 1449 if (conn->param_list) 1450 intrname = iscsi_find_param_from_key(INITIATORNAME, 1451 conn->param_list); 1452 strlcpy(ls->last_intr_fail_name, 1453 (intrname ? intrname->value : "Unknown"), 1454 sizeof(ls->last_intr_fail_name)); 1455 1456 ls->last_intr_fail_ip_family = conn->login_family; 1457 1458 ls->last_intr_fail_sockaddr = conn->login_sockaddr; 1459 ls->last_fail_time = get_jiffies_64(); 1460 } 1461 1462 spin_unlock(&ls->lock); 1463 } 1464 1465 struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn) 1466 { 1467 struct iscsi_portal_group *tpg; 1468 1469 if (!conn || !conn->sess) 1470 return NULL; 1471 1472 tpg = conn->sess->tpg; 1473 if (!tpg) 1474 return NULL; 1475 1476 if (!tpg->tpg_tiqn) 1477 return NULL; 1478 1479 return tpg->tpg_tiqn; 1480 } 1481