1 /******************************************************************************* 2 * This file contains the iSCSI Target specific utility functions. 3 * 4 * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. 5 * 6 * Licensed to the Linux Foundation under the General Public License (GPL) version 2. 7 * 8 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 ******************************************************************************/ 20 21 #include <linux/list.h> 22 #include <scsi/scsi_tcq.h> 23 #include <scsi/iscsi_proto.h> 24 #include <target/target_core_base.h> 25 #include <target/target_core_transport.h> 26 #include <target/target_core_tmr.h> 27 #include <target/target_core_fabric_ops.h> 28 #include <target/target_core_configfs.h> 29 30 #include "iscsi_target_core.h" 31 #include "iscsi_target_parameters.h" 32 #include "iscsi_target_seq_pdu_list.h" 33 #include "iscsi_target_datain_values.h" 34 #include "iscsi_target_erl0.h" 35 #include "iscsi_target_erl1.h" 36 #include "iscsi_target_erl2.h" 37 #include "iscsi_target_tpg.h" 38 #include "iscsi_target_tq.h" 39 #include "iscsi_target_util.h" 40 #include "iscsi_target.h" 41 42 #define PRINT_BUFF(buff, len) \ 43 { \ 44 int zzz; \ 45 \ 46 pr_debug("%d:\n", __LINE__); \ 47 for (zzz = 0; zzz < len; zzz++) { \ 48 if (zzz % 16 == 0) { \ 49 if (zzz) \ 50 pr_debug("\n"); \ 51 pr_debug("%4i: ", zzz); \ 52 } \ 53 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \ 54 } \ 55 if ((len + 1) % 16) \ 56 pr_debug("\n"); \ 57 } 58 59 extern struct list_head g_tiqn_list; 60 extern spinlock_t tiqn_lock; 61 62 /* 63 * Called with cmd->r2t_lock held. 64 */ 65 int iscsit_add_r2t_to_list( 66 struct iscsi_cmd *cmd, 67 u32 offset, 68 u32 xfer_len, 69 int recovery, 70 u32 r2t_sn) 71 { 72 struct iscsi_r2t *r2t; 73 74 r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC); 75 if (!r2t) { 76 pr_err("Unable to allocate memory for struct iscsi_r2t.\n"); 77 return -1; 78 } 79 INIT_LIST_HEAD(&r2t->r2t_list); 80 81 r2t->recovery_r2t = recovery; 82 r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn; 83 r2t->offset = offset; 84 r2t->xfer_len = xfer_len; 85 list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list); 86 spin_unlock_bh(&cmd->r2t_lock); 87 88 iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T); 89 90 spin_lock_bh(&cmd->r2t_lock); 91 return 0; 92 } 93 94 struct iscsi_r2t *iscsit_get_r2t_for_eos( 95 struct iscsi_cmd *cmd, 96 u32 offset, 97 u32 length) 98 { 99 struct iscsi_r2t *r2t; 100 101 spin_lock_bh(&cmd->r2t_lock); 102 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 103 if ((r2t->offset <= offset) && 104 (r2t->offset + r2t->xfer_len) >= (offset + length)) { 105 spin_unlock_bh(&cmd->r2t_lock); 106 return r2t; 107 } 108 } 109 spin_unlock_bh(&cmd->r2t_lock); 110 111 pr_err("Unable to locate R2T for Offset: %u, Length:" 112 " %u\n", offset, length); 113 return NULL; 114 } 115 116 struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd) 117 { 118 struct iscsi_r2t *r2t; 119 120 spin_lock_bh(&cmd->r2t_lock); 121 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 122 if (!r2t->sent_r2t) { 123 spin_unlock_bh(&cmd->r2t_lock); 124 return r2t; 125 } 126 } 127 spin_unlock_bh(&cmd->r2t_lock); 128 129 pr_err("Unable to locate next R2T to send for ITT:" 130 " 0x%08x.\n", cmd->init_task_tag); 131 return NULL; 132 } 133 134 /* 135 * Called with cmd->r2t_lock held. 136 */ 137 void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd) 138 { 139 list_del(&r2t->r2t_list); 140 kmem_cache_free(lio_r2t_cache, r2t); 141 } 142 143 void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd) 144 { 145 struct iscsi_r2t *r2t, *r2t_tmp; 146 147 spin_lock_bh(&cmd->r2t_lock); 148 list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) 149 iscsit_free_r2t(r2t, cmd); 150 spin_unlock_bh(&cmd->r2t_lock); 151 } 152 153 /* 154 * May be called from software interrupt (timer) context for allocating 155 * iSCSI NopINs. 156 */ 157 struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) 158 { 159 struct iscsi_cmd *cmd; 160 161 cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask); 162 if (!cmd) { 163 pr_err("Unable to allocate memory for struct iscsi_cmd.\n"); 164 return NULL; 165 } 166 167 cmd->conn = conn; 168 INIT_LIST_HEAD(&cmd->i_list); 169 INIT_LIST_HEAD(&cmd->datain_list); 170 INIT_LIST_HEAD(&cmd->cmd_r2t_list); 171 init_completion(&cmd->reject_comp); 172 spin_lock_init(&cmd->datain_lock); 173 spin_lock_init(&cmd->dataout_timeout_lock); 174 spin_lock_init(&cmd->istate_lock); 175 spin_lock_init(&cmd->error_lock); 176 spin_lock_init(&cmd->r2t_lock); 177 178 return cmd; 179 } 180 181 /* 182 * Called from iscsi_handle_scsi_cmd() 183 */ 184 struct iscsi_cmd *iscsit_allocate_se_cmd( 185 struct iscsi_conn *conn, 186 u32 data_length, 187 int data_direction, 188 int iscsi_task_attr) 189 { 190 struct iscsi_cmd *cmd; 191 struct se_cmd *se_cmd; 192 int sam_task_attr; 193 194 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 195 if (!cmd) 196 return NULL; 197 198 cmd->data_direction = data_direction; 199 cmd->data_length = data_length; 200 /* 201 * Figure out the SAM Task Attribute for the incoming SCSI CDB 202 */ 203 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || 204 (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) 205 sam_task_attr = MSG_SIMPLE_TAG; 206 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) 207 sam_task_attr = MSG_ORDERED_TAG; 208 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) 209 sam_task_attr = MSG_HEAD_TAG; 210 else if (iscsi_task_attr == ISCSI_ATTR_ACA) 211 sam_task_attr = MSG_ACA_TAG; 212 else { 213 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" 214 " MSG_SIMPLE_TAG\n", iscsi_task_attr); 215 sam_task_attr = MSG_SIMPLE_TAG; 216 } 217 218 se_cmd = &cmd->se_cmd; 219 /* 220 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 221 */ 222 transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops, 223 conn->sess->se_sess, data_length, data_direction, 224 sam_task_attr, &cmd->sense_buffer[0]); 225 return cmd; 226 } 227 228 struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( 229 struct iscsi_conn *conn, 230 u8 function) 231 { 232 struct iscsi_cmd *cmd; 233 struct se_cmd *se_cmd; 234 u8 tcm_function; 235 236 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); 237 if (!cmd) 238 return NULL; 239 240 cmd->data_direction = DMA_NONE; 241 242 cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL); 243 if (!cmd->tmr_req) { 244 pr_err("Unable to allocate memory for" 245 " Task Management command!\n"); 246 goto out; 247 } 248 /* 249 * TASK_REASSIGN for ERL=2 / connection stays inside of 250 * LIO-Target $FABRIC_MOD 251 */ 252 if (function == ISCSI_TM_FUNC_TASK_REASSIGN) 253 return cmd; 254 255 se_cmd = &cmd->se_cmd; 256 /* 257 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 258 */ 259 transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops, 260 conn->sess->se_sess, 0, DMA_NONE, 261 MSG_SIMPLE_TAG, &cmd->sense_buffer[0]); 262 263 switch (function) { 264 case ISCSI_TM_FUNC_ABORT_TASK: 265 tcm_function = TMR_ABORT_TASK; 266 break; 267 case ISCSI_TM_FUNC_ABORT_TASK_SET: 268 tcm_function = TMR_ABORT_TASK_SET; 269 break; 270 case ISCSI_TM_FUNC_CLEAR_ACA: 271 tcm_function = TMR_CLEAR_ACA; 272 break; 273 case ISCSI_TM_FUNC_CLEAR_TASK_SET: 274 tcm_function = TMR_CLEAR_TASK_SET; 275 break; 276 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: 277 tcm_function = TMR_LUN_RESET; 278 break; 279 case ISCSI_TM_FUNC_TARGET_WARM_RESET: 280 tcm_function = TMR_TARGET_WARM_RESET; 281 break; 282 case ISCSI_TM_FUNC_TARGET_COLD_RESET: 283 tcm_function = TMR_TARGET_COLD_RESET; 284 break; 285 default: 286 pr_err("Unknown iSCSI TMR Function:" 287 " 0x%02x\n", function); 288 goto out; 289 } 290 291 se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, 292 (void *)cmd->tmr_req, tcm_function, 293 GFP_KERNEL); 294 if (!se_cmd->se_tmr_req) 295 goto out; 296 297 cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req; 298 299 return cmd; 300 out: 301 iscsit_release_cmd(cmd); 302 return NULL; 303 } 304 305 int iscsit_decide_list_to_build( 306 struct iscsi_cmd *cmd, 307 u32 immediate_data_length) 308 { 309 struct iscsi_build_list bl; 310 struct iscsi_conn *conn = cmd->conn; 311 struct iscsi_session *sess = conn->sess; 312 struct iscsi_node_attrib *na; 313 314 if (sess->sess_ops->DataSequenceInOrder && 315 sess->sess_ops->DataPDUInOrder) 316 return 0; 317 318 if (cmd->data_direction == DMA_NONE) 319 return 0; 320 321 na = iscsit_tpg_get_node_attrib(sess); 322 memset(&bl, 0, sizeof(struct iscsi_build_list)); 323 324 if (cmd->data_direction == DMA_FROM_DEVICE) { 325 bl.data_direction = ISCSI_PDU_READ; 326 bl.type = PDULIST_NORMAL; 327 if (na->random_datain_pdu_offsets) 328 bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS; 329 if (na->random_datain_seq_offsets) 330 bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS; 331 } else { 332 bl.data_direction = ISCSI_PDU_WRITE; 333 bl.immediate_data_length = immediate_data_length; 334 if (na->random_r2t_offsets) 335 bl.randomize |= RANDOM_R2T_OFFSETS; 336 337 if (!cmd->immediate_data && !cmd->unsolicited_data) 338 bl.type = PDULIST_NORMAL; 339 else if (cmd->immediate_data && !cmd->unsolicited_data) 340 bl.type = PDULIST_IMMEDIATE; 341 else if (!cmd->immediate_data && cmd->unsolicited_data) 342 bl.type = PDULIST_UNSOLICITED; 343 else if (cmd->immediate_data && cmd->unsolicited_data) 344 bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED; 345 } 346 347 return iscsit_do_build_list(cmd, &bl); 348 } 349 350 struct iscsi_seq *iscsit_get_seq_holder_for_datain( 351 struct iscsi_cmd *cmd, 352 u32 seq_send_order) 353 { 354 u32 i; 355 356 for (i = 0; i < cmd->seq_count; i++) 357 if (cmd->seq_list[i].seq_send_order == seq_send_order) 358 return &cmd->seq_list[i]; 359 360 return NULL; 361 } 362 363 struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd) 364 { 365 u32 i; 366 367 if (!cmd->seq_list) { 368 pr_err("struct iscsi_cmd->seq_list is NULL!\n"); 369 return NULL; 370 } 371 372 for (i = 0; i < cmd->seq_count; i++) { 373 if (cmd->seq_list[i].type != SEQTYPE_NORMAL) 374 continue; 375 if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) { 376 cmd->seq_send_order++; 377 return &cmd->seq_list[i]; 378 } 379 } 380 381 return NULL; 382 } 383 384 struct iscsi_r2t *iscsit_get_holder_for_r2tsn( 385 struct iscsi_cmd *cmd, 386 u32 r2t_sn) 387 { 388 struct iscsi_r2t *r2t; 389 390 spin_lock_bh(&cmd->r2t_lock); 391 list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) { 392 if (r2t->r2t_sn == r2t_sn) { 393 spin_unlock_bh(&cmd->r2t_lock); 394 return r2t; 395 } 396 } 397 spin_unlock_bh(&cmd->r2t_lock); 398 399 return NULL; 400 } 401 402 static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn) 403 { 404 int ret; 405 406 /* 407 * This is the proper method of checking received CmdSN against 408 * ExpCmdSN and MaxCmdSN values, as well as accounting for out 409 * or order CmdSNs due to multiple connection sessions and/or 410 * CRC failures. 411 */ 412 if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) { 413 pr_err("Received CmdSN: 0x%08x is greater than" 414 " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn, 415 sess->max_cmd_sn); 416 ret = CMDSN_ERROR_CANNOT_RECOVER; 417 418 } else if (cmdsn == sess->exp_cmd_sn) { 419 sess->exp_cmd_sn++; 420 pr_debug("Received CmdSN matches ExpCmdSN," 421 " incremented ExpCmdSN to: 0x%08x\n", 422 sess->exp_cmd_sn); 423 ret = CMDSN_NORMAL_OPERATION; 424 425 } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) { 426 pr_debug("Received CmdSN: 0x%08x is greater" 427 " than ExpCmdSN: 0x%08x, not acknowledging.\n", 428 cmdsn, sess->exp_cmd_sn); 429 ret = CMDSN_HIGHER_THAN_EXP; 430 431 } else { 432 pr_err("Received CmdSN: 0x%08x is less than" 433 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn, 434 sess->exp_cmd_sn); 435 ret = CMDSN_LOWER_THAN_EXP; 436 } 437 438 return ret; 439 } 440 441 /* 442 * Commands may be received out of order if MC/S is in use. 443 * Ensure they are executed in CmdSN order. 444 */ 445 int iscsit_sequence_cmd( 446 struct iscsi_conn *conn, 447 struct iscsi_cmd *cmd, 448 u32 cmdsn) 449 { 450 int ret; 451 int cmdsn_ret; 452 453 mutex_lock(&conn->sess->cmdsn_mutex); 454 455 cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn); 456 switch (cmdsn_ret) { 457 case CMDSN_NORMAL_OPERATION: 458 ret = iscsit_execute_cmd(cmd, 0); 459 if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list)) 460 iscsit_execute_ooo_cmdsns(conn->sess); 461 break; 462 case CMDSN_HIGHER_THAN_EXP: 463 ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn); 464 break; 465 case CMDSN_LOWER_THAN_EXP: 466 cmd->i_state = ISTATE_REMOVE; 467 iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); 468 ret = cmdsn_ret; 469 break; 470 default: 471 ret = cmdsn_ret; 472 break; 473 } 474 mutex_unlock(&conn->sess->cmdsn_mutex); 475 476 return ret; 477 } 478 479 int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf) 480 { 481 struct iscsi_conn *conn = cmd->conn; 482 struct se_cmd *se_cmd = &cmd->se_cmd; 483 struct iscsi_data *hdr = (struct iscsi_data *) buf; 484 u32 payload_length = ntoh24(hdr->dlength); 485 486 if (conn->sess->sess_ops->InitialR2T) { 487 pr_err("Received unexpected unsolicited data" 488 " while InitialR2T=Yes, protocol error.\n"); 489 transport_send_check_condition_and_sense(se_cmd, 490 TCM_UNEXPECTED_UNSOLICITED_DATA, 0); 491 return -1; 492 } 493 494 if ((cmd->first_burst_len + payload_length) > 495 conn->sess->sess_ops->FirstBurstLength) { 496 pr_err("Total %u bytes exceeds FirstBurstLength: %u" 497 " for this Unsolicited DataOut Burst.\n", 498 (cmd->first_burst_len + payload_length), 499 conn->sess->sess_ops->FirstBurstLength); 500 transport_send_check_condition_and_sense(se_cmd, 501 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 502 return -1; 503 } 504 505 if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) 506 return 0; 507 508 if (((cmd->first_burst_len + payload_length) != cmd->data_length) && 509 ((cmd->first_burst_len + payload_length) != 510 conn->sess->sess_ops->FirstBurstLength)) { 511 pr_err("Unsolicited non-immediate data received %u" 512 " does not equal FirstBurstLength: %u, and does" 513 " not equal ExpXferLen %u.\n", 514 (cmd->first_burst_len + payload_length), 515 conn->sess->sess_ops->FirstBurstLength, cmd->data_length); 516 transport_send_check_condition_and_sense(se_cmd, 517 TCM_INCORRECT_AMOUNT_OF_DATA, 0); 518 return -1; 519 } 520 return 0; 521 } 522 523 struct iscsi_cmd *iscsit_find_cmd_from_itt( 524 struct iscsi_conn *conn, 525 u32 init_task_tag) 526 { 527 struct iscsi_cmd *cmd; 528 529 spin_lock_bh(&conn->cmd_lock); 530 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 531 if (cmd->init_task_tag == init_task_tag) { 532 spin_unlock_bh(&conn->cmd_lock); 533 return cmd; 534 } 535 } 536 spin_unlock_bh(&conn->cmd_lock); 537 538 pr_err("Unable to locate ITT: 0x%08x on CID: %hu", 539 init_task_tag, conn->cid); 540 return NULL; 541 } 542 543 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( 544 struct iscsi_conn *conn, 545 u32 init_task_tag, 546 u32 length) 547 { 548 struct iscsi_cmd *cmd; 549 550 spin_lock_bh(&conn->cmd_lock); 551 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 552 if (cmd->init_task_tag == init_task_tag) { 553 spin_unlock_bh(&conn->cmd_lock); 554 return cmd; 555 } 556 } 557 spin_unlock_bh(&conn->cmd_lock); 558 559 pr_err("Unable to locate ITT: 0x%08x on CID: %hu," 560 " dumping payload\n", init_task_tag, conn->cid); 561 if (length) 562 iscsit_dump_data_payload(conn, length, 1); 563 564 return NULL; 565 } 566 567 struct iscsi_cmd *iscsit_find_cmd_from_ttt( 568 struct iscsi_conn *conn, 569 u32 targ_xfer_tag) 570 { 571 struct iscsi_cmd *cmd = NULL; 572 573 spin_lock_bh(&conn->cmd_lock); 574 list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) { 575 if (cmd->targ_xfer_tag == targ_xfer_tag) { 576 spin_unlock_bh(&conn->cmd_lock); 577 return cmd; 578 } 579 } 580 spin_unlock_bh(&conn->cmd_lock); 581 582 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n", 583 targ_xfer_tag, conn->cid); 584 return NULL; 585 } 586 587 int iscsit_find_cmd_for_recovery( 588 struct iscsi_session *sess, 589 struct iscsi_cmd **cmd_ptr, 590 struct iscsi_conn_recovery **cr_ptr, 591 u32 init_task_tag) 592 { 593 struct iscsi_cmd *cmd = NULL; 594 struct iscsi_conn_recovery *cr; 595 /* 596 * Scan through the inactive connection recovery list's command list. 597 * If init_task_tag matches the command is still alligent. 598 */ 599 spin_lock(&sess->cr_i_lock); 600 list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) { 601 spin_lock(&cr->conn_recovery_cmd_lock); 602 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) { 603 if (cmd->init_task_tag == init_task_tag) { 604 spin_unlock(&cr->conn_recovery_cmd_lock); 605 spin_unlock(&sess->cr_i_lock); 606 607 *cr_ptr = cr; 608 *cmd_ptr = cmd; 609 return -2; 610 } 611 } 612 spin_unlock(&cr->conn_recovery_cmd_lock); 613 } 614 spin_unlock(&sess->cr_i_lock); 615 /* 616 * Scan through the active connection recovery list's command list. 617 * If init_task_tag matches the command is ready to be reassigned. 618 */ 619 spin_lock(&sess->cr_a_lock); 620 list_for_each_entry(cr, &sess->cr_active_list, cr_list) { 621 spin_lock(&cr->conn_recovery_cmd_lock); 622 list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) { 623 if (cmd->init_task_tag == init_task_tag) { 624 spin_unlock(&cr->conn_recovery_cmd_lock); 625 spin_unlock(&sess->cr_a_lock); 626 627 *cr_ptr = cr; 628 *cmd_ptr = cmd; 629 return 0; 630 } 631 } 632 spin_unlock(&cr->conn_recovery_cmd_lock); 633 } 634 spin_unlock(&sess->cr_a_lock); 635 636 return -1; 637 } 638 639 void iscsit_add_cmd_to_immediate_queue( 640 struct iscsi_cmd *cmd, 641 struct iscsi_conn *conn, 642 u8 state) 643 { 644 struct iscsi_queue_req *qr; 645 646 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); 647 if (!qr) { 648 pr_err("Unable to allocate memory for" 649 " struct iscsi_queue_req\n"); 650 return; 651 } 652 INIT_LIST_HEAD(&qr->qr_list); 653 qr->cmd = cmd; 654 qr->state = state; 655 656 spin_lock_bh(&conn->immed_queue_lock); 657 list_add_tail(&qr->qr_list, &conn->immed_queue_list); 658 atomic_inc(&cmd->immed_queue_count); 659 atomic_set(&conn->check_immediate_queue, 1); 660 spin_unlock_bh(&conn->immed_queue_lock); 661 662 wake_up_process(conn->thread_set->tx_thread); 663 } 664 665 struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn) 666 { 667 struct iscsi_queue_req *qr; 668 669 spin_lock_bh(&conn->immed_queue_lock); 670 if (list_empty(&conn->immed_queue_list)) { 671 spin_unlock_bh(&conn->immed_queue_lock); 672 return NULL; 673 } 674 list_for_each_entry(qr, &conn->immed_queue_list, qr_list) 675 break; 676 677 list_del(&qr->qr_list); 678 if (qr->cmd) 679 atomic_dec(&qr->cmd->immed_queue_count); 680 spin_unlock_bh(&conn->immed_queue_lock); 681 682 return qr; 683 } 684 685 static void iscsit_remove_cmd_from_immediate_queue( 686 struct iscsi_cmd *cmd, 687 struct iscsi_conn *conn) 688 { 689 struct iscsi_queue_req *qr, *qr_tmp; 690 691 spin_lock_bh(&conn->immed_queue_lock); 692 if (!atomic_read(&cmd->immed_queue_count)) { 693 spin_unlock_bh(&conn->immed_queue_lock); 694 return; 695 } 696 697 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { 698 if (qr->cmd != cmd) 699 continue; 700 701 atomic_dec(&qr->cmd->immed_queue_count); 702 list_del(&qr->qr_list); 703 kmem_cache_free(lio_qr_cache, qr); 704 } 705 spin_unlock_bh(&conn->immed_queue_lock); 706 707 if (atomic_read(&cmd->immed_queue_count)) { 708 pr_err("ITT: 0x%08x immed_queue_count: %d\n", 709 cmd->init_task_tag, 710 atomic_read(&cmd->immed_queue_count)); 711 } 712 } 713 714 void iscsit_add_cmd_to_response_queue( 715 struct iscsi_cmd *cmd, 716 struct iscsi_conn *conn, 717 u8 state) 718 { 719 struct iscsi_queue_req *qr; 720 721 qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC); 722 if (!qr) { 723 pr_err("Unable to allocate memory for" 724 " struct iscsi_queue_req\n"); 725 return; 726 } 727 INIT_LIST_HEAD(&qr->qr_list); 728 qr->cmd = cmd; 729 qr->state = state; 730 731 spin_lock_bh(&conn->response_queue_lock); 732 list_add_tail(&qr->qr_list, &conn->response_queue_list); 733 atomic_inc(&cmd->response_queue_count); 734 spin_unlock_bh(&conn->response_queue_lock); 735 736 wake_up_process(conn->thread_set->tx_thread); 737 } 738 739 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) 740 { 741 struct iscsi_queue_req *qr; 742 743 spin_lock_bh(&conn->response_queue_lock); 744 if (list_empty(&conn->response_queue_list)) { 745 spin_unlock_bh(&conn->response_queue_lock); 746 return NULL; 747 } 748 749 list_for_each_entry(qr, &conn->response_queue_list, qr_list) 750 break; 751 752 list_del(&qr->qr_list); 753 if (qr->cmd) 754 atomic_dec(&qr->cmd->response_queue_count); 755 spin_unlock_bh(&conn->response_queue_lock); 756 757 return qr; 758 } 759 760 static void iscsit_remove_cmd_from_response_queue( 761 struct iscsi_cmd *cmd, 762 struct iscsi_conn *conn) 763 { 764 struct iscsi_queue_req *qr, *qr_tmp; 765 766 spin_lock_bh(&conn->response_queue_lock); 767 if (!atomic_read(&cmd->response_queue_count)) { 768 spin_unlock_bh(&conn->response_queue_lock); 769 return; 770 } 771 772 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, 773 qr_list) { 774 if (qr->cmd != cmd) 775 continue; 776 777 atomic_dec(&qr->cmd->response_queue_count); 778 list_del(&qr->qr_list); 779 kmem_cache_free(lio_qr_cache, qr); 780 } 781 spin_unlock_bh(&conn->response_queue_lock); 782 783 if (atomic_read(&cmd->response_queue_count)) { 784 pr_err("ITT: 0x%08x response_queue_count: %d\n", 785 cmd->init_task_tag, 786 atomic_read(&cmd->response_queue_count)); 787 } 788 } 789 790 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) 791 { 792 struct iscsi_queue_req *qr, *qr_tmp; 793 794 spin_lock_bh(&conn->immed_queue_lock); 795 list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) { 796 list_del(&qr->qr_list); 797 if (qr->cmd) 798 atomic_dec(&qr->cmd->immed_queue_count); 799 800 kmem_cache_free(lio_qr_cache, qr); 801 } 802 spin_unlock_bh(&conn->immed_queue_lock); 803 804 spin_lock_bh(&conn->response_queue_lock); 805 list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list, 806 qr_list) { 807 list_del(&qr->qr_list); 808 if (qr->cmd) 809 atomic_dec(&qr->cmd->response_queue_count); 810 811 kmem_cache_free(lio_qr_cache, qr); 812 } 813 spin_unlock_bh(&conn->response_queue_lock); 814 } 815 816 void iscsit_release_cmd(struct iscsi_cmd *cmd) 817 { 818 struct iscsi_conn *conn = cmd->conn; 819 int i; 820 821 iscsit_free_r2ts_from_list(cmd); 822 iscsit_free_all_datain_reqs(cmd); 823 824 kfree(cmd->buf_ptr); 825 kfree(cmd->pdu_list); 826 kfree(cmd->seq_list); 827 kfree(cmd->tmr_req); 828 kfree(cmd->iov_data); 829 830 for (i = 0; i < cmd->t_mem_sg_nents; i++) 831 __free_page(sg_page(&cmd->t_mem_sg[i])); 832 833 kfree(cmd->t_mem_sg); 834 835 if (conn) { 836 iscsit_remove_cmd_from_immediate_queue(cmd, conn); 837 iscsit_remove_cmd_from_response_queue(cmd, conn); 838 } 839 840 kmem_cache_free(lio_cmd_cache, cmd); 841 } 842 843 void iscsit_free_cmd(struct iscsi_cmd *cmd) 844 { 845 /* 846 * Determine if a struct se_cmd is assoicated with 847 * this struct iscsi_cmd. 848 */ 849 switch (cmd->iscsi_opcode) { 850 case ISCSI_OP_SCSI_CMD: 851 case ISCSI_OP_SCSI_TMFUNC: 852 transport_generic_free_cmd(&cmd->se_cmd, 1); 853 break; 854 default: 855 iscsit_release_cmd(cmd); 856 break; 857 } 858 } 859 860 int iscsit_check_session_usage_count(struct iscsi_session *sess) 861 { 862 spin_lock_bh(&sess->session_usage_lock); 863 if (sess->session_usage_count != 0) { 864 sess->session_waiting_on_uc = 1; 865 spin_unlock_bh(&sess->session_usage_lock); 866 if (in_interrupt()) 867 return 2; 868 869 wait_for_completion(&sess->session_waiting_on_uc_comp); 870 return 1; 871 } 872 spin_unlock_bh(&sess->session_usage_lock); 873 874 return 0; 875 } 876 877 void iscsit_dec_session_usage_count(struct iscsi_session *sess) 878 { 879 spin_lock_bh(&sess->session_usage_lock); 880 sess->session_usage_count--; 881 882 if (!sess->session_usage_count && sess->session_waiting_on_uc) 883 complete(&sess->session_waiting_on_uc_comp); 884 885 spin_unlock_bh(&sess->session_usage_lock); 886 } 887 888 void iscsit_inc_session_usage_count(struct iscsi_session *sess) 889 { 890 spin_lock_bh(&sess->session_usage_lock); 891 sess->session_usage_count++; 892 spin_unlock_bh(&sess->session_usage_lock); 893 } 894 895 /* 896 * Setup conn->if_marker and conn->of_marker values based upon 897 * the initial marker-less interval. (see iSCSI v19 A.2) 898 */ 899 int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn) 900 { 901 int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0; 902 /* 903 * IFMarkInt and OFMarkInt are negotiated as 32-bit words. 904 */ 905 u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4); 906 u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4); 907 908 if (conn->conn_ops->OFMarker) { 909 /* 910 * Account for the first Login Command received not 911 * via iscsi_recv_msg(). 912 */ 913 conn->of_marker += ISCSI_HDR_LEN; 914 if (conn->of_marker <= OFMarkInt) { 915 conn->of_marker = (OFMarkInt - conn->of_marker); 916 } else { 917 login_ofmarker_count = (conn->of_marker / OFMarkInt); 918 next_marker = (OFMarkInt * (login_ofmarker_count + 1)) + 919 (login_ofmarker_count * MARKER_SIZE); 920 conn->of_marker = (next_marker - conn->of_marker); 921 } 922 conn->of_marker_offset = 0; 923 pr_debug("Setting OFMarker value to %u based on Initial" 924 " Markerless Interval.\n", conn->of_marker); 925 } 926 927 if (conn->conn_ops->IFMarker) { 928 if (conn->if_marker <= IFMarkInt) { 929 conn->if_marker = (IFMarkInt - conn->if_marker); 930 } else { 931 login_ifmarker_count = (conn->if_marker / IFMarkInt); 932 next_marker = (IFMarkInt * (login_ifmarker_count + 1)) + 933 (login_ifmarker_count * MARKER_SIZE); 934 conn->if_marker = (next_marker - conn->if_marker); 935 } 936 pr_debug("Setting IFMarker value to %u based on Initial" 937 " Markerless Interval.\n", conn->if_marker); 938 } 939 940 return 0; 941 } 942 943 struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid) 944 { 945 struct iscsi_conn *conn; 946 947 spin_lock_bh(&sess->conn_lock); 948 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 949 if ((conn->cid == cid) && 950 (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) { 951 iscsit_inc_conn_usage_count(conn); 952 spin_unlock_bh(&sess->conn_lock); 953 return conn; 954 } 955 } 956 spin_unlock_bh(&sess->conn_lock); 957 958 return NULL; 959 } 960 961 struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid) 962 { 963 struct iscsi_conn *conn; 964 965 spin_lock_bh(&sess->conn_lock); 966 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) { 967 if (conn->cid == cid) { 968 iscsit_inc_conn_usage_count(conn); 969 spin_lock(&conn->state_lock); 970 atomic_set(&conn->connection_wait_rcfr, 1); 971 spin_unlock(&conn->state_lock); 972 spin_unlock_bh(&sess->conn_lock); 973 return conn; 974 } 975 } 976 spin_unlock_bh(&sess->conn_lock); 977 978 return NULL; 979 } 980 981 void iscsit_check_conn_usage_count(struct iscsi_conn *conn) 982 { 983 spin_lock_bh(&conn->conn_usage_lock); 984 if (conn->conn_usage_count != 0) { 985 conn->conn_waiting_on_uc = 1; 986 spin_unlock_bh(&conn->conn_usage_lock); 987 988 wait_for_completion(&conn->conn_waiting_on_uc_comp); 989 return; 990 } 991 spin_unlock_bh(&conn->conn_usage_lock); 992 } 993 994 void iscsit_dec_conn_usage_count(struct iscsi_conn *conn) 995 { 996 spin_lock_bh(&conn->conn_usage_lock); 997 conn->conn_usage_count--; 998 999 if (!conn->conn_usage_count && conn->conn_waiting_on_uc) 1000 complete(&conn->conn_waiting_on_uc_comp); 1001 1002 spin_unlock_bh(&conn->conn_usage_lock); 1003 } 1004 1005 void iscsit_inc_conn_usage_count(struct iscsi_conn *conn) 1006 { 1007 spin_lock_bh(&conn->conn_usage_lock); 1008 conn->conn_usage_count++; 1009 spin_unlock_bh(&conn->conn_usage_lock); 1010 } 1011 1012 static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) 1013 { 1014 u8 state; 1015 struct iscsi_cmd *cmd; 1016 1017 cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC); 1018 if (!cmd) 1019 return -1; 1020 1021 cmd->iscsi_opcode = ISCSI_OP_NOOP_IN; 1022 state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : 1023 ISTATE_SEND_NOPIN_NO_RESPONSE; 1024 cmd->init_task_tag = 0xFFFFFFFF; 1025 spin_lock_bh(&conn->sess->ttt_lock); 1026 cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ : 1027 0xFFFFFFFF; 1028 if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF)) 1029 cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; 1030 spin_unlock_bh(&conn->sess->ttt_lock); 1031 1032 spin_lock_bh(&conn->cmd_lock); 1033 list_add_tail(&cmd->i_list, &conn->conn_cmd_list); 1034 spin_unlock_bh(&conn->cmd_lock); 1035 1036 if (want_response) 1037 iscsit_start_nopin_response_timer(conn); 1038 iscsit_add_cmd_to_immediate_queue(cmd, conn, state); 1039 1040 return 0; 1041 } 1042 1043 static void iscsit_handle_nopin_response_timeout(unsigned long data) 1044 { 1045 struct iscsi_conn *conn = (struct iscsi_conn *) data; 1046 1047 iscsit_inc_conn_usage_count(conn); 1048 1049 spin_lock_bh(&conn->nopin_timer_lock); 1050 if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) { 1051 spin_unlock_bh(&conn->nopin_timer_lock); 1052 iscsit_dec_conn_usage_count(conn); 1053 return; 1054 } 1055 1056 pr_debug("Did not receive response to NOPIN on CID: %hu on" 1057 " SID: %u, failing connection.\n", conn->cid, 1058 conn->sess->sid); 1059 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 1060 spin_unlock_bh(&conn->nopin_timer_lock); 1061 1062 { 1063 struct iscsi_portal_group *tpg = conn->sess->tpg; 1064 struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; 1065 1066 if (tiqn) { 1067 spin_lock_bh(&tiqn->sess_err_stats.lock); 1068 strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, 1069 (void *)conn->sess->sess_ops->InitiatorName); 1070 tiqn->sess_err_stats.last_sess_failure_type = 1071 ISCSI_SESS_ERR_CXN_TIMEOUT; 1072 tiqn->sess_err_stats.cxn_timeout_errors++; 1073 conn->sess->conn_timeout_errors++; 1074 spin_unlock_bh(&tiqn->sess_err_stats.lock); 1075 } 1076 } 1077 1078 iscsit_cause_connection_reinstatement(conn, 0); 1079 iscsit_dec_conn_usage_count(conn); 1080 } 1081 1082 void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn) 1083 { 1084 struct iscsi_session *sess = conn->sess; 1085 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1086 1087 spin_lock_bh(&conn->nopin_timer_lock); 1088 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { 1089 spin_unlock_bh(&conn->nopin_timer_lock); 1090 return; 1091 } 1092 1093 mod_timer(&conn->nopin_response_timer, 1094 (get_jiffies_64() + na->nopin_response_timeout * HZ)); 1095 spin_unlock_bh(&conn->nopin_timer_lock); 1096 } 1097 1098 /* 1099 * Called with conn->nopin_timer_lock held. 1100 */ 1101 void iscsit_start_nopin_response_timer(struct iscsi_conn *conn) 1102 { 1103 struct iscsi_session *sess = conn->sess; 1104 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1105 1106 spin_lock_bh(&conn->nopin_timer_lock); 1107 if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) { 1108 spin_unlock_bh(&conn->nopin_timer_lock); 1109 return; 1110 } 1111 1112 init_timer(&conn->nopin_response_timer); 1113 conn->nopin_response_timer.expires = 1114 (get_jiffies_64() + na->nopin_response_timeout * HZ); 1115 conn->nopin_response_timer.data = (unsigned long)conn; 1116 conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout; 1117 conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP; 1118 conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING; 1119 add_timer(&conn->nopin_response_timer); 1120 1121 pr_debug("Started NOPIN Response Timer on CID: %d to %u" 1122 " seconds\n", conn->cid, na->nopin_response_timeout); 1123 spin_unlock_bh(&conn->nopin_timer_lock); 1124 } 1125 1126 void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn) 1127 { 1128 spin_lock_bh(&conn->nopin_timer_lock); 1129 if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) { 1130 spin_unlock_bh(&conn->nopin_timer_lock); 1131 return; 1132 } 1133 conn->nopin_response_timer_flags |= ISCSI_TF_STOP; 1134 spin_unlock_bh(&conn->nopin_timer_lock); 1135 1136 del_timer_sync(&conn->nopin_response_timer); 1137 1138 spin_lock_bh(&conn->nopin_timer_lock); 1139 conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING; 1140 spin_unlock_bh(&conn->nopin_timer_lock); 1141 } 1142 1143 static void iscsit_handle_nopin_timeout(unsigned long data) 1144 { 1145 struct iscsi_conn *conn = (struct iscsi_conn *) data; 1146 1147 iscsit_inc_conn_usage_count(conn); 1148 1149 spin_lock_bh(&conn->nopin_timer_lock); 1150 if (conn->nopin_timer_flags & ISCSI_TF_STOP) { 1151 spin_unlock_bh(&conn->nopin_timer_lock); 1152 iscsit_dec_conn_usage_count(conn); 1153 return; 1154 } 1155 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; 1156 spin_unlock_bh(&conn->nopin_timer_lock); 1157 1158 iscsit_add_nopin(conn, 1); 1159 iscsit_dec_conn_usage_count(conn); 1160 } 1161 1162 /* 1163 * Called with conn->nopin_timer_lock held. 1164 */ 1165 void __iscsit_start_nopin_timer(struct iscsi_conn *conn) 1166 { 1167 struct iscsi_session *sess = conn->sess; 1168 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1169 /* 1170 * NOPIN timeout is disabled. 1171 */ 1172 if (!na->nopin_timeout) 1173 return; 1174 1175 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) 1176 return; 1177 1178 init_timer(&conn->nopin_timer); 1179 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); 1180 conn->nopin_timer.data = (unsigned long)conn; 1181 conn->nopin_timer.function = iscsit_handle_nopin_timeout; 1182 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1183 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1184 add_timer(&conn->nopin_timer); 1185 1186 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1187 " interval\n", conn->cid, na->nopin_timeout); 1188 } 1189 1190 void iscsit_start_nopin_timer(struct iscsi_conn *conn) 1191 { 1192 struct iscsi_session *sess = conn->sess; 1193 struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); 1194 /* 1195 * NOPIN timeout is disabled.. 1196 */ 1197 if (!na->nopin_timeout) 1198 return; 1199 1200 spin_lock_bh(&conn->nopin_timer_lock); 1201 if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) { 1202 spin_unlock_bh(&conn->nopin_timer_lock); 1203 return; 1204 } 1205 1206 init_timer(&conn->nopin_timer); 1207 conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); 1208 conn->nopin_timer.data = (unsigned long)conn; 1209 conn->nopin_timer.function = iscsit_handle_nopin_timeout; 1210 conn->nopin_timer_flags &= ~ISCSI_TF_STOP; 1211 conn->nopin_timer_flags |= ISCSI_TF_RUNNING; 1212 add_timer(&conn->nopin_timer); 1213 1214 pr_debug("Started NOPIN Timer on CID: %d at %u second" 1215 " interval\n", conn->cid, na->nopin_timeout); 1216 spin_unlock_bh(&conn->nopin_timer_lock); 1217 } 1218 1219 void iscsit_stop_nopin_timer(struct iscsi_conn *conn) 1220 { 1221 spin_lock_bh(&conn->nopin_timer_lock); 1222 if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) { 1223 spin_unlock_bh(&conn->nopin_timer_lock); 1224 return; 1225 } 1226 conn->nopin_timer_flags |= ISCSI_TF_STOP; 1227 spin_unlock_bh(&conn->nopin_timer_lock); 1228 1229 del_timer_sync(&conn->nopin_timer); 1230 1231 spin_lock_bh(&conn->nopin_timer_lock); 1232 conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING; 1233 spin_unlock_bh(&conn->nopin_timer_lock); 1234 } 1235 1236 int iscsit_send_tx_data( 1237 struct iscsi_cmd *cmd, 1238 struct iscsi_conn *conn, 1239 int use_misc) 1240 { 1241 int tx_sent, tx_size; 1242 u32 iov_count; 1243 struct kvec *iov; 1244 1245 send_data: 1246 tx_size = cmd->tx_size; 1247 1248 if (!use_misc) { 1249 iov = &cmd->iov_data[0]; 1250 iov_count = cmd->iov_data_count; 1251 } else { 1252 iov = &cmd->iov_misc[0]; 1253 iov_count = cmd->iov_misc_count; 1254 } 1255 1256 tx_sent = tx_data(conn, &iov[0], iov_count, tx_size); 1257 if (tx_size != tx_sent) { 1258 if (tx_sent == -EAGAIN) { 1259 pr_err("tx_data() returned -EAGAIN\n"); 1260 goto send_data; 1261 } else 1262 return -1; 1263 } 1264 cmd->tx_size = 0; 1265 1266 return 0; 1267 } 1268 1269 int iscsit_fe_sendpage_sg( 1270 struct iscsi_cmd *cmd, 1271 struct iscsi_conn *conn) 1272 { 1273 struct scatterlist *sg = cmd->first_data_sg; 1274 struct kvec iov; 1275 u32 tx_hdr_size, data_len; 1276 u32 offset = cmd->first_data_sg_off; 1277 int tx_sent, iov_off; 1278 1279 send_hdr: 1280 tx_hdr_size = ISCSI_HDR_LEN; 1281 if (conn->conn_ops->HeaderDigest) 1282 tx_hdr_size += ISCSI_CRC_LEN; 1283 1284 iov.iov_base = cmd->pdu; 1285 iov.iov_len = tx_hdr_size; 1286 1287 tx_sent = tx_data(conn, &iov, 1, tx_hdr_size); 1288 if (tx_hdr_size != tx_sent) { 1289 if (tx_sent == -EAGAIN) { 1290 pr_err("tx_data() returned -EAGAIN\n"); 1291 goto send_hdr; 1292 } 1293 return -1; 1294 } 1295 1296 data_len = cmd->tx_size - tx_hdr_size - cmd->padding; 1297 /* 1298 * Set iov_off used by padding and data digest tx_data() calls below 1299 * in order to determine proper offset into cmd->iov_data[] 1300 */ 1301 if (conn->conn_ops->DataDigest) { 1302 data_len -= ISCSI_CRC_LEN; 1303 if (cmd->padding) 1304 iov_off = (cmd->iov_data_count - 2); 1305 else 1306 iov_off = (cmd->iov_data_count - 1); 1307 } else { 1308 iov_off = (cmd->iov_data_count - 1); 1309 } 1310 /* 1311 * Perform sendpage() for each page in the scatterlist 1312 */ 1313 while (data_len) { 1314 u32 space = (sg->length - offset); 1315 u32 sub_len = min_t(u32, data_len, space); 1316 send_pg: 1317 tx_sent = conn->sock->ops->sendpage(conn->sock, 1318 sg_page(sg), sg->offset + offset, sub_len, 0); 1319 if (tx_sent != sub_len) { 1320 if (tx_sent == -EAGAIN) { 1321 pr_err("tcp_sendpage() returned" 1322 " -EAGAIN\n"); 1323 goto send_pg; 1324 } 1325 1326 pr_err("tcp_sendpage() failure: %d\n", 1327 tx_sent); 1328 return -1; 1329 } 1330 1331 data_len -= sub_len; 1332 offset = 0; 1333 sg = sg_next(sg); 1334 } 1335 1336 send_padding: 1337 if (cmd->padding) { 1338 struct kvec *iov_p = &cmd->iov_data[iov_off++]; 1339 1340 tx_sent = tx_data(conn, iov_p, 1, cmd->padding); 1341 if (cmd->padding != tx_sent) { 1342 if (tx_sent == -EAGAIN) { 1343 pr_err("tx_data() returned -EAGAIN\n"); 1344 goto send_padding; 1345 } 1346 return -1; 1347 } 1348 } 1349 1350 send_datacrc: 1351 if (conn->conn_ops->DataDigest) { 1352 struct kvec *iov_d = &cmd->iov_data[iov_off]; 1353 1354 tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN); 1355 if (ISCSI_CRC_LEN != tx_sent) { 1356 if (tx_sent == -EAGAIN) { 1357 pr_err("tx_data() returned -EAGAIN\n"); 1358 goto send_datacrc; 1359 } 1360 return -1; 1361 } 1362 } 1363 1364 return 0; 1365 } 1366 1367 /* 1368 * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU 1369 * back to the Initiator when an expection condition occurs with the 1370 * errors set in status_class and status_detail. 1371 * 1372 * Parameters: iSCSI Connection, Status Class, Status Detail. 1373 * Returns: 0 on success, -1 on error. 1374 */ 1375 int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail) 1376 { 1377 u8 iscsi_hdr[ISCSI_HDR_LEN]; 1378 int err; 1379 struct kvec iov; 1380 struct iscsi_login_rsp *hdr; 1381 1382 iscsit_collect_login_stats(conn, status_class, status_detail); 1383 1384 memset(&iov, 0, sizeof(struct kvec)); 1385 memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN); 1386 1387 hdr = (struct iscsi_login_rsp *)&iscsi_hdr; 1388 hdr->opcode = ISCSI_OP_LOGIN_RSP; 1389 hdr->status_class = status_class; 1390 hdr->status_detail = status_detail; 1391 hdr->itt = cpu_to_be32(conn->login_itt); 1392 1393 iov.iov_base = &iscsi_hdr; 1394 iov.iov_len = ISCSI_HDR_LEN; 1395 1396 PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN); 1397 1398 err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN); 1399 if (err != ISCSI_HDR_LEN) { 1400 pr_err("tx_data returned less than expected\n"); 1401 return -1; 1402 } 1403 1404 return 0; 1405 } 1406 1407 void iscsit_print_session_params(struct iscsi_session *sess) 1408 { 1409 struct iscsi_conn *conn; 1410 1411 pr_debug("-----------------------------[Session Params for" 1412 " SID: %u]-----------------------------\n", sess->sid); 1413 spin_lock_bh(&sess->conn_lock); 1414 list_for_each_entry(conn, &sess->sess_conn_list, conn_list) 1415 iscsi_dump_conn_ops(conn->conn_ops); 1416 spin_unlock_bh(&sess->conn_lock); 1417 1418 iscsi_dump_sess_ops(sess->sess_ops); 1419 } 1420 1421 static int iscsit_do_rx_data( 1422 struct iscsi_conn *conn, 1423 struct iscsi_data_count *count) 1424 { 1425 int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; 1426 struct kvec *iov_p; 1427 struct msghdr msg; 1428 1429 if (!conn || !conn->sock || !conn->conn_ops) 1430 return -1; 1431 1432 memset(&msg, 0, sizeof(struct msghdr)); 1433 1434 iov_p = count->iov; 1435 iov_len = count->iov_count; 1436 1437 while (total_rx < data) { 1438 rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, 1439 (data - total_rx), MSG_WAITALL); 1440 if (rx_loop <= 0) { 1441 pr_debug("rx_loop: %d total_rx: %d\n", 1442 rx_loop, total_rx); 1443 return rx_loop; 1444 } 1445 total_rx += rx_loop; 1446 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n", 1447 rx_loop, total_rx, data); 1448 } 1449 1450 return total_rx; 1451 } 1452 1453 static int iscsit_do_tx_data( 1454 struct iscsi_conn *conn, 1455 struct iscsi_data_count *count) 1456 { 1457 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; 1458 struct kvec *iov_p; 1459 struct msghdr msg; 1460 1461 if (!conn || !conn->sock || !conn->conn_ops) 1462 return -1; 1463 1464 if (data <= 0) { 1465 pr_err("Data length is: %d\n", data); 1466 return -1; 1467 } 1468 1469 memset(&msg, 0, sizeof(struct msghdr)); 1470 1471 iov_p = count->iov; 1472 iov_len = count->iov_count; 1473 1474 while (total_tx < data) { 1475 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, 1476 (data - total_tx)); 1477 if (tx_loop <= 0) { 1478 pr_debug("tx_loop: %d total_tx %d\n", 1479 tx_loop, total_tx); 1480 return tx_loop; 1481 } 1482 total_tx += tx_loop; 1483 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", 1484 tx_loop, total_tx, data); 1485 } 1486 1487 return total_tx; 1488 } 1489 1490 int rx_data( 1491 struct iscsi_conn *conn, 1492 struct kvec *iov, 1493 int iov_count, 1494 int data) 1495 { 1496 struct iscsi_data_count c; 1497 1498 if (!conn || !conn->sock || !conn->conn_ops) 1499 return -1; 1500 1501 memset(&c, 0, sizeof(struct iscsi_data_count)); 1502 c.iov = iov; 1503 c.iov_count = iov_count; 1504 c.data_length = data; 1505 c.type = ISCSI_RX_DATA; 1506 1507 return iscsit_do_rx_data(conn, &c); 1508 } 1509 1510 int tx_data( 1511 struct iscsi_conn *conn, 1512 struct kvec *iov, 1513 int iov_count, 1514 int data) 1515 { 1516 struct iscsi_data_count c; 1517 1518 if (!conn || !conn->sock || !conn->conn_ops) 1519 return -1; 1520 1521 memset(&c, 0, sizeof(struct iscsi_data_count)); 1522 c.iov = iov; 1523 c.iov_count = iov_count; 1524 c.data_length = data; 1525 c.type = ISCSI_TX_DATA; 1526 1527 return iscsit_do_tx_data(conn, &c); 1528 } 1529 1530 void iscsit_collect_login_stats( 1531 struct iscsi_conn *conn, 1532 u8 status_class, 1533 u8 status_detail) 1534 { 1535 struct iscsi_param *intrname = NULL; 1536 struct iscsi_tiqn *tiqn; 1537 struct iscsi_login_stats *ls; 1538 1539 tiqn = iscsit_snmp_get_tiqn(conn); 1540 if (!tiqn) 1541 return; 1542 1543 ls = &tiqn->login_stats; 1544 1545 spin_lock(&ls->lock); 1546 if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) && 1547 ((get_jiffies_64() - ls->last_fail_time) < 10)) { 1548 /* We already have the failure info for this login */ 1549 spin_unlock(&ls->lock); 1550 return; 1551 } 1552 1553 if (status_class == ISCSI_STATUS_CLS_SUCCESS) 1554 ls->accepts++; 1555 else if (status_class == ISCSI_STATUS_CLS_REDIRECT) { 1556 ls->redirects++; 1557 ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT; 1558 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1559 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) { 1560 ls->authenticate_fails++; 1561 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE; 1562 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1563 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) { 1564 ls->authorize_fails++; 1565 ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE; 1566 } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) && 1567 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) { 1568 ls->negotiate_fails++; 1569 ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE; 1570 } else { 1571 ls->other_fails++; 1572 ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER; 1573 } 1574 1575 /* Save initiator name, ip address and time, if it is a failed login */ 1576 if (status_class != ISCSI_STATUS_CLS_SUCCESS) { 1577 if (conn->param_list) 1578 intrname = iscsi_find_param_from_key(INITIATORNAME, 1579 conn->param_list); 1580 strcpy(ls->last_intr_fail_name, 1581 (intrname ? intrname->value : "Unknown")); 1582 1583 ls->last_intr_fail_ip_family = conn->sock->sk->sk_family; 1584 snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE, 1585 "%s", conn->login_ip); 1586 ls->last_fail_time = get_jiffies_64(); 1587 } 1588 1589 spin_unlock(&ls->lock); 1590 } 1591 1592 struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn) 1593 { 1594 struct iscsi_portal_group *tpg; 1595 1596 if (!conn || !conn->sess) 1597 return NULL; 1598 1599 tpg = conn->sess->tpg; 1600 if (!tpg) 1601 return NULL; 1602 1603 if (!tpg->tpg_tiqn) 1604 return NULL; 1605 1606 return tpg->tpg_tiqn; 1607 } 1608