1 /* 2 * iSCSI lib functions 3 * 4 * Copyright (C) 2006 Red Hat, Inc. All rights reserved. 5 * Copyright (C) 2004 - 2006 Mike Christie 6 * Copyright (C) 2004 - 2005 Dmitry Yusupov 7 * Copyright (C) 2004 - 2005 Alex Aizman 8 * maintained by open-iscsi@googlegroups.com 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 */ 24 #include <linux/types.h> 25 #include <linux/kfifo.h> 26 #include <linux/delay.h> 27 #include <linux/log2.h> 28 #include <asm/unaligned.h> 29 #include <net/tcp.h> 30 #include <scsi/scsi_cmnd.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_eh.h> 33 #include <scsi/scsi_tcq.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi.h> 36 #include <scsi/iscsi_proto.h> 37 #include <scsi/scsi_transport.h> 38 #include <scsi/scsi_transport_iscsi.h> 39 #include <scsi/libiscsi.h> 40 41 /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ 42 #define SNA32_CHECK 2147483648UL 43 44 static int iscsi_sna_lt(u32 n1, u32 n2) 45 { 46 return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) || 47 (n1 > n2 && (n2 - n1 < SNA32_CHECK))); 48 } 49 50 /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ 51 static int iscsi_sna_lte(u32 n1, u32 n2) 52 { 53 return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) || 54 (n1 > n2 && (n2 - n1 < SNA32_CHECK))); 55 } 56 57 void 58 iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) 59 { 60 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn); 61 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn); 62 63 /* 64 * standard specifies this check for when to update expected and 65 * max sequence numbers 66 */ 67 if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1)) 68 return; 69 70 if (exp_cmdsn != session->exp_cmdsn && 71 !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn)) 72 session->exp_cmdsn = exp_cmdsn; 73 74 if (max_cmdsn != session->max_cmdsn && 75 !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) { 76 session->max_cmdsn = max_cmdsn; 77 /* 78 * if the window closed with IO queued, then kick the 79 * xmit thread 80 */ 81 if (!list_empty(&session->leadconn->xmitqueue) || 82 !list_empty(&session->leadconn->mgmtqueue)) { 83 if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 84 scsi_queue_work(session->host, 85 &session->leadconn->xmitwork); 86 } 87 } 88 } 89 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 90 91 void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task, 92 struct iscsi_data *hdr) 93 { 94 struct iscsi_conn *conn = task->conn; 95 96 memset(hdr, 0, sizeof(struct iscsi_data)); 97 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); 98 hdr->datasn = cpu_to_be32(task->unsol_datasn); 99 task->unsol_datasn++; 100 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 101 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun)); 102 103 hdr->itt = task->hdr->itt; 104 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 105 hdr->offset = cpu_to_be32(task->unsol_offset); 106 107 if (task->unsol_count > conn->max_xmit_dlength) { 108 hton24(hdr->dlength, conn->max_xmit_dlength); 109 task->data_count = conn->max_xmit_dlength; 110 task->unsol_offset += task->data_count; 111 hdr->flags = 0; 112 } else { 113 hton24(hdr->dlength, task->unsol_count); 114 task->data_count = task->unsol_count; 115 hdr->flags = ISCSI_FLAG_CMD_FINAL; 116 } 117 } 118 EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu); 119 120 static int iscsi_add_hdr(struct iscsi_task *task, unsigned len) 121 { 122 unsigned exp_len = task->hdr_len + len; 123 124 if (exp_len > task->hdr_max) { 125 WARN_ON(1); 126 return -EINVAL; 127 } 128 129 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */ 130 task->hdr_len = exp_len; 131 return 0; 132 } 133 134 /* 135 * make an extended cdb AHS 136 */ 137 static int iscsi_prep_ecdb_ahs(struct iscsi_task *task) 138 { 139 struct scsi_cmnd *cmd = task->sc; 140 unsigned rlen, pad_len; 141 unsigned short ahslength; 142 struct iscsi_ecdb_ahdr *ecdb_ahdr; 143 int rc; 144 145 ecdb_ahdr = iscsi_next_hdr(task); 146 rlen = cmd->cmd_len - ISCSI_CDB_SIZE; 147 148 BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb)); 149 ahslength = rlen + sizeof(ecdb_ahdr->reserved); 150 151 pad_len = iscsi_padding(rlen); 152 153 rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) + 154 sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len); 155 if (rc) 156 return rc; 157 158 if (pad_len) 159 memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len); 160 161 ecdb_ahdr->ahslength = cpu_to_be16(ahslength); 162 ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB; 163 ecdb_ahdr->reserved = 0; 164 memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen); 165 166 debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d " 167 "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n", 168 cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len); 169 170 return 0; 171 } 172 173 static int iscsi_prep_bidi_ahs(struct iscsi_task *task) 174 { 175 struct scsi_cmnd *sc = task->sc; 176 struct iscsi_rlength_ahdr *rlen_ahdr; 177 int rc; 178 179 rlen_ahdr = iscsi_next_hdr(task); 180 rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr)); 181 if (rc) 182 return rc; 183 184 rlen_ahdr->ahslength = 185 cpu_to_be16(sizeof(rlen_ahdr->read_length) + 186 sizeof(rlen_ahdr->reserved)); 187 rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH; 188 rlen_ahdr->reserved = 0; 189 rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length); 190 191 debug_scsi("bidi-in rlen_ahdr->read_length(%d) " 192 "rlen_ahdr->ahslength(%d)\n", 193 be32_to_cpu(rlen_ahdr->read_length), 194 be16_to_cpu(rlen_ahdr->ahslength)); 195 return 0; 196 } 197 198 /** 199 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu 200 * @task: iscsi task 201 * 202 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set 203 * fields like dlength or final based on how much data it sends 204 */ 205 static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) 206 { 207 struct iscsi_conn *conn = task->conn; 208 struct iscsi_session *session = conn->session; 209 struct iscsi_cmd *hdr = task->hdr; 210 struct scsi_cmnd *sc = task->sc; 211 unsigned hdrlength, cmd_len; 212 int rc; 213 214 task->hdr_len = 0; 215 rc = iscsi_add_hdr(task, sizeof(*hdr)); 216 if (rc) 217 return rc; 218 hdr->opcode = ISCSI_OP_SCSI_CMD; 219 hdr->flags = ISCSI_ATTR_SIMPLE; 220 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 221 hdr->itt = build_itt(task->itt, session->age); 222 hdr->cmdsn = cpu_to_be32(session->cmdsn); 223 session->cmdsn++; 224 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 225 cmd_len = sc->cmd_len; 226 if (cmd_len < ISCSI_CDB_SIZE) 227 memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len); 228 else if (cmd_len > ISCSI_CDB_SIZE) { 229 rc = iscsi_prep_ecdb_ahs(task); 230 if (rc) 231 return rc; 232 cmd_len = ISCSI_CDB_SIZE; 233 } 234 memcpy(hdr->cdb, sc->cmnd, cmd_len); 235 236 task->imm_count = 0; 237 if (scsi_bidi_cmnd(sc)) { 238 hdr->flags |= ISCSI_FLAG_CMD_READ; 239 rc = iscsi_prep_bidi_ahs(task); 240 if (rc) 241 return rc; 242 } 243 if (sc->sc_data_direction == DMA_TO_DEVICE) { 244 unsigned out_len = scsi_out(sc)->length; 245 hdr->data_length = cpu_to_be32(out_len); 246 hdr->flags |= ISCSI_FLAG_CMD_WRITE; 247 /* 248 * Write counters: 249 * 250 * imm_count bytes to be sent right after 251 * SCSI PDU Header 252 * 253 * unsol_count bytes(as Data-Out) to be sent 254 * without R2T ack right after 255 * immediate data 256 * 257 * r2t_data_count bytes to be sent via R2T ack's 258 * 259 * pad_count bytes to be sent as zero-padding 260 */ 261 task->unsol_count = 0; 262 task->unsol_offset = 0; 263 task->unsol_datasn = 0; 264 265 if (session->imm_data_en) { 266 if (out_len >= session->first_burst) 267 task->imm_count = min(session->first_burst, 268 conn->max_xmit_dlength); 269 else 270 task->imm_count = min(out_len, 271 conn->max_xmit_dlength); 272 hton24(hdr->dlength, task->imm_count); 273 } else 274 zero_data(hdr->dlength); 275 276 if (!session->initial_r2t_en) { 277 task->unsol_count = min(session->first_burst, out_len) 278 - task->imm_count; 279 task->unsol_offset = task->imm_count; 280 } 281 282 if (!task->unsol_count) 283 /* No unsolicit Data-Out's */ 284 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 285 } else { 286 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 287 zero_data(hdr->dlength); 288 hdr->data_length = cpu_to_be32(scsi_in(sc)->length); 289 290 if (sc->sc_data_direction == DMA_FROM_DEVICE) 291 hdr->flags |= ISCSI_FLAG_CMD_READ; 292 } 293 294 /* calculate size of additional header segments (AHSs) */ 295 hdrlength = task->hdr_len - sizeof(*hdr); 296 297 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1)); 298 hdrlength /= ISCSI_PAD_LEN; 299 300 WARN_ON(hdrlength >= 256); 301 hdr->hlength = hdrlength & 0xFF; 302 303 if (conn->session->tt->init_task && 304 conn->session->tt->init_task(task)) 305 return -EIO; 306 307 task->state = ISCSI_TASK_RUNNING; 308 list_move_tail(&task->running, &conn->run_list); 309 310 conn->scsicmd_pdus_cnt++; 311 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d " 312 "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ? 313 "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ? 314 "write" : "read", conn->id, sc, sc->cmnd[0], task->itt, 315 scsi_bufflen(sc), 316 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, 317 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); 318 return 0; 319 } 320 321 /** 322 * iscsi_complete_command - finish a task 323 * @task: iscsi cmd task 324 * 325 * Must be called with session lock. 326 * This function returns the scsi command to scsi-ml or cleans 327 * up mgmt tasks then returns the task to the pool. 328 */ 329 static void iscsi_complete_command(struct iscsi_task *task) 330 { 331 struct iscsi_conn *conn = task->conn; 332 struct iscsi_session *session = conn->session; 333 struct scsi_cmnd *sc = task->sc; 334 335 list_del_init(&task->running); 336 task->state = ISCSI_TASK_COMPLETED; 337 task->sc = NULL; 338 339 if (conn->task == task) 340 conn->task = NULL; 341 /* 342 * login task is preallocated so do not free 343 */ 344 if (conn->login_task == task) 345 return; 346 347 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); 348 349 if (conn->ping_task == task) 350 conn->ping_task = NULL; 351 352 if (sc) { 353 task->sc = NULL; 354 /* SCSI eh reuses commands to verify us */ 355 sc->SCp.ptr = NULL; 356 /* 357 * queue command may call this to free the task, but 358 * not have setup the sc callback 359 */ 360 if (sc->scsi_done) 361 sc->scsi_done(sc); 362 } 363 } 364 365 void __iscsi_get_task(struct iscsi_task *task) 366 { 367 atomic_inc(&task->refcount); 368 } 369 EXPORT_SYMBOL_GPL(__iscsi_get_task); 370 371 static void __iscsi_put_task(struct iscsi_task *task) 372 { 373 if (atomic_dec_and_test(&task->refcount)) 374 iscsi_complete_command(task); 375 } 376 377 void iscsi_put_task(struct iscsi_task *task) 378 { 379 struct iscsi_session *session = task->conn->session; 380 381 spin_lock_bh(&session->lock); 382 __iscsi_put_task(task); 383 spin_unlock_bh(&session->lock); 384 } 385 EXPORT_SYMBOL_GPL(iscsi_put_task); 386 387 /* 388 * session lock must be held 389 */ 390 static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, 391 int err) 392 { 393 struct scsi_cmnd *sc; 394 395 sc = task->sc; 396 if (!sc) 397 return; 398 399 if (task->state == ISCSI_TASK_PENDING) 400 /* 401 * cmd never made it to the xmit thread, so we should not count 402 * the cmd in the sequencing 403 */ 404 conn->session->queued_cmdsn--; 405 else 406 conn->session->tt->cleanup_task(conn, task); 407 408 sc->result = err; 409 if (!scsi_bidi_cmnd(sc)) 410 scsi_set_resid(sc, scsi_bufflen(sc)); 411 else { 412 scsi_out(sc)->resid = scsi_out(sc)->length; 413 scsi_in(sc)->resid = scsi_in(sc)->length; 414 } 415 416 if (conn->task == task) 417 conn->task = NULL; 418 /* release ref from queuecommand */ 419 __iscsi_put_task(task); 420 } 421 422 static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, 423 struct iscsi_task *task) 424 { 425 struct iscsi_session *session = conn->session; 426 struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr; 427 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; 428 429 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) 430 return -ENOTCONN; 431 432 if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) && 433 hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) 434 nop->exp_statsn = cpu_to_be32(conn->exp_statsn); 435 /* 436 * pre-format CmdSN for outgoing PDU. 437 */ 438 nop->cmdsn = cpu_to_be32(session->cmdsn); 439 if (hdr->itt != RESERVED_ITT) { 440 hdr->itt = build_itt(task->itt, session->age); 441 /* 442 * TODO: We always use immediate, so we never hit this. 443 * If we start to send tmfs or nops as non-immediate then 444 * we should start checking the cmdsn numbers for mgmt tasks. 445 */ 446 if (conn->c_stage == ISCSI_CONN_STARTED && 447 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 448 session->queued_cmdsn++; 449 session->cmdsn++; 450 } 451 } 452 453 if (session->tt->init_task) 454 session->tt->init_task(task); 455 456 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 457 session->state = ISCSI_STATE_LOGGING_OUT; 458 459 list_move_tail(&task->running, &conn->mgmt_run_list); 460 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n", 461 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt, 462 task->data_count); 463 return 0; 464 } 465 466 static struct iscsi_task * 467 __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 468 char *data, uint32_t data_size) 469 { 470 struct iscsi_session *session = conn->session; 471 struct iscsi_task *task; 472 473 if (session->state == ISCSI_STATE_TERMINATE) 474 return NULL; 475 476 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) || 477 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) 478 /* 479 * Login and Text are sent serially, in 480 * request-followed-by-response sequence. 481 * Same task can be used. Same ITT must be used. 482 * Note that login_task is preallocated at conn_create(). 483 */ 484 task = conn->login_task; 485 else { 486 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 487 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 488 489 if (!__kfifo_get(session->cmdpool.queue, 490 (void*)&task, sizeof(void*))) 491 return NULL; 492 493 if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) && 494 hdr->ttt == RESERVED_ITT) { 495 conn->ping_task = task; 496 conn->last_ping = jiffies; 497 } 498 } 499 /* 500 * released in complete pdu for task we expect a response for, and 501 * released by the lld when it has transmitted the task for 502 * pdus we do not expect a response for. 503 */ 504 atomic_set(&task->refcount, 1); 505 task->conn = conn; 506 task->sc = NULL; 507 508 if (data_size) { 509 memcpy(task->data, data, data_size); 510 task->data_count = data_size; 511 } else 512 task->data_count = 0; 513 514 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); 515 INIT_LIST_HEAD(&task->running); 516 list_add_tail(&task->running, &conn->mgmtqueue); 517 518 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 519 if (iscsi_prep_mgmt_task(conn, task)) { 520 __iscsi_put_task(task); 521 return NULL; 522 } 523 524 if (session->tt->xmit_task(task)) 525 task = NULL; 526 527 } else 528 scsi_queue_work(conn->session->host, &conn->xmitwork); 529 530 return task; 531 } 532 533 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 534 char *data, uint32_t data_size) 535 { 536 struct iscsi_conn *conn = cls_conn->dd_data; 537 struct iscsi_session *session = conn->session; 538 int err = 0; 539 540 spin_lock_bh(&session->lock); 541 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size)) 542 err = -EPERM; 543 spin_unlock_bh(&session->lock); 544 return err; 545 } 546 EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); 547 548 /** 549 * iscsi_cmd_rsp - SCSI Command Response processing 550 * @conn: iscsi connection 551 * @hdr: iscsi header 552 * @task: scsi command task 553 * @data: cmd data buffer 554 * @datalen: len of buffer 555 * 556 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and 557 * then completes the command and task. 558 **/ 559 static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 560 struct iscsi_task *task, char *data, 561 int datalen) 562 { 563 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr; 564 struct iscsi_session *session = conn->session; 565 struct scsi_cmnd *sc = task->sc; 566 567 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 568 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 569 570 sc->result = (DID_OK << 16) | rhdr->cmd_status; 571 572 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) { 573 sc->result = DID_ERROR << 16; 574 goto out; 575 } 576 577 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) { 578 uint16_t senselen; 579 580 if (datalen < 2) { 581 invalid_datalen: 582 iscsi_conn_printk(KERN_ERR, conn, 583 "Got CHECK_CONDITION but invalid data " 584 "buffer size of %d\n", datalen); 585 sc->result = DID_BAD_TARGET << 16; 586 goto out; 587 } 588 589 senselen = get_unaligned_be16(data); 590 if (datalen < senselen) 591 goto invalid_datalen; 592 593 memcpy(sc->sense_buffer, data + 2, 594 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); 595 debug_scsi("copied %d bytes of sense\n", 596 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); 597 } 598 599 if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW | 600 ISCSI_FLAG_CMD_BIDI_OVERFLOW)) { 601 int res_count = be32_to_cpu(rhdr->bi_residual_count); 602 603 if (scsi_bidi_cmnd(sc) && res_count > 0 && 604 (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW || 605 res_count <= scsi_in(sc)->length)) 606 scsi_in(sc)->resid = res_count; 607 else 608 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 609 } 610 611 if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW | 612 ISCSI_FLAG_CMD_OVERFLOW)) { 613 int res_count = be32_to_cpu(rhdr->residual_count); 614 615 if (res_count > 0 && 616 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || 617 res_count <= scsi_bufflen(sc))) 618 /* write side for bidi or uni-io set_resid */ 619 scsi_set_resid(sc, res_count); 620 else 621 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 622 } 623 out: 624 debug_scsi("done [sc %lx res %d itt 0x%x]\n", 625 (long)sc, sc->result, task->itt); 626 conn->scsirsp_pdus_cnt++; 627 628 __iscsi_put_task(task); 629 } 630 631 /** 632 * iscsi_data_in_rsp - SCSI Data-In Response processing 633 * @conn: iscsi connection 634 * @hdr: iscsi pdu 635 * @task: scsi command task 636 **/ 637 static void 638 iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 639 struct iscsi_task *task) 640 { 641 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr; 642 struct scsi_cmnd *sc = task->sc; 643 644 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) 645 return; 646 647 sc->result = (DID_OK << 16) | rhdr->cmd_status; 648 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 649 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | 650 ISCSI_FLAG_DATA_OVERFLOW)) { 651 int res_count = be32_to_cpu(rhdr->residual_count); 652 653 if (res_count > 0 && 654 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || 655 res_count <= scsi_in(sc)->length)) 656 scsi_in(sc)->resid = res_count; 657 else 658 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 659 } 660 661 conn->scsirsp_pdus_cnt++; 662 __iscsi_put_task(task); 663 } 664 665 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 666 { 667 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; 668 669 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 670 conn->tmfrsp_pdus_cnt++; 671 672 if (conn->tmf_state != TMF_QUEUED) 673 return; 674 675 if (tmf->response == ISCSI_TMF_RSP_COMPLETE) 676 conn->tmf_state = TMF_SUCCESS; 677 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) 678 conn->tmf_state = TMF_NOT_FOUND; 679 else 680 conn->tmf_state = TMF_FAILED; 681 wake_up(&conn->ehwait); 682 } 683 684 static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) 685 { 686 struct iscsi_nopout hdr; 687 struct iscsi_task *task; 688 689 if (!rhdr && conn->ping_task) 690 return; 691 692 memset(&hdr, 0, sizeof(struct iscsi_nopout)); 693 hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; 694 hdr.flags = ISCSI_FLAG_CMD_FINAL; 695 696 if (rhdr) { 697 memcpy(hdr.lun, rhdr->lun, 8); 698 hdr.ttt = rhdr->ttt; 699 hdr.itt = RESERVED_ITT; 700 } else 701 hdr.ttt = RESERVED_ITT; 702 703 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); 704 if (!task) 705 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); 706 } 707 708 static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 709 char *data, int datalen) 710 { 711 struct iscsi_reject *reject = (struct iscsi_reject *)hdr; 712 struct iscsi_hdr rejected_pdu; 713 uint32_t itt; 714 715 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1; 716 717 if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) { 718 if (ntoh24(reject->dlength) > datalen) 719 return ISCSI_ERR_PROTO; 720 721 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { 722 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); 723 itt = get_itt(rejected_pdu.itt); 724 iscsi_conn_printk(KERN_ERR, conn, 725 "itt 0x%x had pdu (op 0x%x) rejected " 726 "due to DataDigest error.\n", itt, 727 rejected_pdu.opcode); 728 } 729 } 730 return 0; 731 } 732 733 /** 734 * iscsi_itt_to_task - look up task by itt 735 * @conn: iscsi connection 736 * @itt: itt 737 * 738 * This should be used for mgmt tasks like login and nops, or if 739 * the LDD's itt space does not include the session age. 740 * 741 * The session lock must be held. 742 */ 743 static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) 744 { 745 struct iscsi_session *session = conn->session; 746 uint32_t i; 747 748 if (itt == RESERVED_ITT) 749 return NULL; 750 751 i = get_itt(itt); 752 if (i >= session->cmds_max) 753 return NULL; 754 755 return session->cmds[i]; 756 } 757 758 /** 759 * __iscsi_complete_pdu - complete pdu 760 * @conn: iscsi conn 761 * @hdr: iscsi header 762 * @data: data buffer 763 * @datalen: len of data buffer 764 * 765 * Completes pdu processing by freeing any resources allocated at 766 * queuecommand or send generic. session lock must be held and verify 767 * itt must have been called. 768 */ 769 int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 770 char *data, int datalen) 771 { 772 struct iscsi_session *session = conn->session; 773 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0; 774 struct iscsi_task *task; 775 uint32_t itt; 776 777 conn->last_recv = jiffies; 778 rc = iscsi_verify_itt(conn, hdr->itt); 779 if (rc) 780 return rc; 781 782 if (hdr->itt != RESERVED_ITT) 783 itt = get_itt(hdr->itt); 784 else 785 itt = ~0U; 786 787 debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n", 788 opcode, conn->id, itt, datalen); 789 790 if (itt == ~0U) { 791 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 792 793 switch(opcode) { 794 case ISCSI_OP_NOOP_IN: 795 if (datalen) { 796 rc = ISCSI_ERR_PROTO; 797 break; 798 } 799 800 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) 801 break; 802 803 iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr); 804 break; 805 case ISCSI_OP_REJECT: 806 rc = iscsi_handle_reject(conn, hdr, data, datalen); 807 break; 808 case ISCSI_OP_ASYNC_EVENT: 809 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 810 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 811 rc = ISCSI_ERR_CONN_FAILED; 812 break; 813 default: 814 rc = ISCSI_ERR_BAD_OPCODE; 815 break; 816 } 817 goto out; 818 } 819 820 switch(opcode) { 821 case ISCSI_OP_SCSI_CMD_RSP: 822 case ISCSI_OP_SCSI_DATA_IN: 823 task = iscsi_itt_to_ctask(conn, hdr->itt); 824 if (!task) 825 return ISCSI_ERR_BAD_ITT; 826 break; 827 case ISCSI_OP_R2T: 828 /* 829 * LLD handles R2Ts if they need to. 830 */ 831 return 0; 832 case ISCSI_OP_LOGOUT_RSP: 833 case ISCSI_OP_LOGIN_RSP: 834 case ISCSI_OP_TEXT_RSP: 835 case ISCSI_OP_SCSI_TMFUNC_RSP: 836 case ISCSI_OP_NOOP_IN: 837 task = iscsi_itt_to_task(conn, hdr->itt); 838 if (!task) 839 return ISCSI_ERR_BAD_ITT; 840 break; 841 default: 842 return ISCSI_ERR_BAD_OPCODE; 843 } 844 845 switch(opcode) { 846 case ISCSI_OP_SCSI_CMD_RSP: 847 iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen); 848 break; 849 case ISCSI_OP_SCSI_DATA_IN: 850 iscsi_data_in_rsp(conn, hdr, task); 851 break; 852 case ISCSI_OP_LOGOUT_RSP: 853 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 854 if (datalen) { 855 rc = ISCSI_ERR_PROTO; 856 break; 857 } 858 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 859 goto recv_pdu; 860 case ISCSI_OP_LOGIN_RSP: 861 case ISCSI_OP_TEXT_RSP: 862 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 863 /* 864 * login related PDU's exp_statsn is handled in 865 * userspace 866 */ 867 goto recv_pdu; 868 case ISCSI_OP_SCSI_TMFUNC_RSP: 869 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 870 if (datalen) { 871 rc = ISCSI_ERR_PROTO; 872 break; 873 } 874 875 iscsi_tmf_rsp(conn, hdr); 876 __iscsi_put_task(task); 877 break; 878 case ISCSI_OP_NOOP_IN: 879 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 880 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) { 881 rc = ISCSI_ERR_PROTO; 882 break; 883 } 884 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 885 886 if (conn->ping_task != task) 887 /* 888 * If this is not in response to one of our 889 * nops then it must be from userspace. 890 */ 891 goto recv_pdu; 892 893 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); 894 __iscsi_put_task(task); 895 break; 896 default: 897 rc = ISCSI_ERR_BAD_OPCODE; 898 break; 899 } 900 901 out: 902 return rc; 903 recv_pdu: 904 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 905 rc = ISCSI_ERR_CONN_FAILED; 906 __iscsi_put_task(task); 907 return rc; 908 } 909 EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); 910 911 int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 912 char *data, int datalen) 913 { 914 int rc; 915 916 spin_lock(&conn->session->lock); 917 rc = __iscsi_complete_pdu(conn, hdr, data, datalen); 918 spin_unlock(&conn->session->lock); 919 return rc; 920 } 921 EXPORT_SYMBOL_GPL(iscsi_complete_pdu); 922 923 int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt) 924 { 925 struct iscsi_session *session = conn->session; 926 uint32_t i; 927 928 if (itt == RESERVED_ITT) 929 return 0; 930 931 if (((__force u32)itt & ISCSI_AGE_MASK) != 932 (session->age << ISCSI_AGE_SHIFT)) { 933 iscsi_conn_printk(KERN_ERR, conn, 934 "received itt %x expected session age (%x)\n", 935 (__force u32)itt, session->age); 936 return ISCSI_ERR_BAD_ITT; 937 } 938 939 i = get_itt(itt); 940 if (i >= session->cmds_max) { 941 iscsi_conn_printk(KERN_ERR, conn, 942 "received invalid itt index %u (max cmds " 943 "%u.\n", i, session->cmds_max); 944 return ISCSI_ERR_BAD_ITT; 945 } 946 return 0; 947 } 948 EXPORT_SYMBOL_GPL(iscsi_verify_itt); 949 950 /** 951 * iscsi_itt_to_ctask - look up ctask by itt 952 * @conn: iscsi connection 953 * @itt: itt 954 * 955 * This should be used for cmd tasks. 956 * 957 * The session lock must be held. 958 */ 959 struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt) 960 { 961 struct iscsi_task *task; 962 963 if (iscsi_verify_itt(conn, itt)) 964 return NULL; 965 966 task = iscsi_itt_to_task(conn, itt); 967 if (!task || !task->sc) 968 return NULL; 969 970 if (task->sc->SCp.phase != conn->session->age) { 971 iscsi_session_printk(KERN_ERR, conn->session, 972 "task's session age %d, expected %d\n", 973 task->sc->SCp.phase, conn->session->age); 974 return NULL; 975 } 976 977 return task; 978 } 979 EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask); 980 981 void iscsi_session_failure(struct iscsi_cls_session *cls_session, 982 enum iscsi_err err) 983 { 984 struct iscsi_session *session = cls_session->dd_data; 985 struct iscsi_conn *conn; 986 struct device *dev; 987 unsigned long flags; 988 989 spin_lock_irqsave(&session->lock, flags); 990 conn = session->leadconn; 991 if (session->state == ISCSI_STATE_TERMINATE || !conn) { 992 spin_unlock_irqrestore(&session->lock, flags); 993 return; 994 } 995 996 dev = get_device(&conn->cls_conn->dev); 997 spin_unlock_irqrestore(&session->lock, flags); 998 if (!dev) 999 return; 1000 /* 1001 * if the host is being removed bypass the connection 1002 * recovery initialization because we are going to kill 1003 * the session. 1004 */ 1005 if (err == ISCSI_ERR_INVALID_HOST) 1006 iscsi_conn_error_event(conn->cls_conn, err); 1007 else 1008 iscsi_conn_failure(conn, err); 1009 put_device(dev); 1010 } 1011 EXPORT_SYMBOL_GPL(iscsi_session_failure); 1012 1013 void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) 1014 { 1015 struct iscsi_session *session = conn->session; 1016 unsigned long flags; 1017 1018 spin_lock_irqsave(&session->lock, flags); 1019 if (session->state == ISCSI_STATE_FAILED) { 1020 spin_unlock_irqrestore(&session->lock, flags); 1021 return; 1022 } 1023 1024 if (conn->stop_stage == 0) 1025 session->state = ISCSI_STATE_FAILED; 1026 spin_unlock_irqrestore(&session->lock, flags); 1027 1028 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1029 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 1030 iscsi_conn_error_event(conn->cls_conn, err); 1031 } 1032 EXPORT_SYMBOL_GPL(iscsi_conn_failure); 1033 1034 static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn) 1035 { 1036 struct iscsi_session *session = conn->session; 1037 1038 /* 1039 * Check for iSCSI window and take care of CmdSN wrap-around 1040 */ 1041 if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) { 1042 debug_scsi("iSCSI CmdSN closed. ExpCmdSn %u MaxCmdSN %u " 1043 "CmdSN %u/%u\n", session->exp_cmdsn, 1044 session->max_cmdsn, session->cmdsn, 1045 session->queued_cmdsn); 1046 return -ENOSPC; 1047 } 1048 return 0; 1049 } 1050 1051 static int iscsi_xmit_task(struct iscsi_conn *conn) 1052 { 1053 struct iscsi_task *task = conn->task; 1054 int rc; 1055 1056 __iscsi_get_task(task); 1057 spin_unlock_bh(&conn->session->lock); 1058 rc = conn->session->tt->xmit_task(task); 1059 spin_lock_bh(&conn->session->lock); 1060 __iscsi_put_task(task); 1061 if (!rc) 1062 /* done with this task */ 1063 conn->task = NULL; 1064 return rc; 1065 } 1066 1067 /** 1068 * iscsi_requeue_task - requeue task to run from session workqueue 1069 * @task: task to requeue 1070 * 1071 * LLDs that need to run a task from the session workqueue should call 1072 * this. The session lock must be held. This should only be called 1073 * by software drivers. 1074 */ 1075 void iscsi_requeue_task(struct iscsi_task *task) 1076 { 1077 struct iscsi_conn *conn = task->conn; 1078 1079 list_move_tail(&task->running, &conn->requeue); 1080 scsi_queue_work(conn->session->host, &conn->xmitwork); 1081 } 1082 EXPORT_SYMBOL_GPL(iscsi_requeue_task); 1083 1084 /** 1085 * iscsi_data_xmit - xmit any command into the scheduled connection 1086 * @conn: iscsi connection 1087 * 1088 * Notes: 1089 * The function can return -EAGAIN in which case the caller must 1090 * re-schedule it again later or recover. '0' return code means 1091 * successful xmit. 1092 **/ 1093 static int iscsi_data_xmit(struct iscsi_conn *conn) 1094 { 1095 int rc = 0; 1096 1097 spin_lock_bh(&conn->session->lock); 1098 if (unlikely(conn->suspend_tx)) { 1099 debug_scsi("conn %d Tx suspended!\n", conn->id); 1100 spin_unlock_bh(&conn->session->lock); 1101 return -ENODATA; 1102 } 1103 1104 if (conn->task) { 1105 rc = iscsi_xmit_task(conn); 1106 if (rc) 1107 goto again; 1108 } 1109 1110 /* 1111 * process mgmt pdus like nops before commands since we should 1112 * only have one nop-out as a ping from us and targets should not 1113 * overflow us with nop-ins 1114 */ 1115 check_mgmt: 1116 while (!list_empty(&conn->mgmtqueue)) { 1117 conn->task = list_entry(conn->mgmtqueue.next, 1118 struct iscsi_task, running); 1119 if (iscsi_prep_mgmt_task(conn, conn->task)) { 1120 __iscsi_put_task(conn->task); 1121 conn->task = NULL; 1122 continue; 1123 } 1124 rc = iscsi_xmit_task(conn); 1125 if (rc) 1126 goto again; 1127 } 1128 1129 /* process pending command queue */ 1130 while (!list_empty(&conn->xmitqueue)) { 1131 if (conn->tmf_state == TMF_QUEUED) 1132 break; 1133 1134 conn->task = list_entry(conn->xmitqueue.next, 1135 struct iscsi_task, running); 1136 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1137 fail_command(conn, conn->task, DID_IMM_RETRY << 16); 1138 continue; 1139 } 1140 if (iscsi_prep_scsi_cmd_pdu(conn->task)) { 1141 fail_command(conn, conn->task, DID_ABORT << 16); 1142 continue; 1143 } 1144 rc = iscsi_xmit_task(conn); 1145 if (rc) 1146 goto again; 1147 /* 1148 * we could continuously get new task requests so 1149 * we need to check the mgmt queue for nops that need to 1150 * be sent to aviod starvation 1151 */ 1152 if (!list_empty(&conn->mgmtqueue)) 1153 goto check_mgmt; 1154 } 1155 1156 while (!list_empty(&conn->requeue)) { 1157 if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL) 1158 break; 1159 1160 /* 1161 * we always do fastlogout - conn stop code will clean up. 1162 */ 1163 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) 1164 break; 1165 1166 conn->task = list_entry(conn->requeue.next, 1167 struct iscsi_task, running); 1168 conn->task->state = ISCSI_TASK_RUNNING; 1169 list_move_tail(conn->requeue.next, &conn->run_list); 1170 rc = iscsi_xmit_task(conn); 1171 if (rc) 1172 goto again; 1173 if (!list_empty(&conn->mgmtqueue)) 1174 goto check_mgmt; 1175 } 1176 spin_unlock_bh(&conn->session->lock); 1177 return -ENODATA; 1178 1179 again: 1180 if (unlikely(conn->suspend_tx)) 1181 rc = -ENODATA; 1182 spin_unlock_bh(&conn->session->lock); 1183 return rc; 1184 } 1185 1186 static void iscsi_xmitworker(struct work_struct *work) 1187 { 1188 struct iscsi_conn *conn = 1189 container_of(work, struct iscsi_conn, xmitwork); 1190 int rc; 1191 /* 1192 * serialize Xmit worker on a per-connection basis. 1193 */ 1194 do { 1195 rc = iscsi_data_xmit(conn); 1196 } while (rc >= 0 || rc == -EAGAIN); 1197 } 1198 1199 enum { 1200 FAILURE_BAD_HOST = 1, 1201 FAILURE_SESSION_FAILED, 1202 FAILURE_SESSION_FREED, 1203 FAILURE_WINDOW_CLOSED, 1204 FAILURE_OOM, 1205 FAILURE_SESSION_TERMINATE, 1206 FAILURE_SESSION_IN_RECOVERY, 1207 FAILURE_SESSION_RECOVERY_TIMEOUT, 1208 FAILURE_SESSION_LOGGING_OUT, 1209 FAILURE_SESSION_NOT_READY, 1210 }; 1211 1212 int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 1213 { 1214 struct iscsi_cls_session *cls_session; 1215 struct Scsi_Host *host; 1216 int reason = 0; 1217 struct iscsi_session *session; 1218 struct iscsi_conn *conn; 1219 struct iscsi_task *task = NULL; 1220 1221 sc->scsi_done = done; 1222 sc->result = 0; 1223 sc->SCp.ptr = NULL; 1224 1225 host = sc->device->host; 1226 spin_unlock(host->host_lock); 1227 1228 cls_session = starget_to_session(scsi_target(sc->device)); 1229 session = cls_session->dd_data; 1230 spin_lock(&session->lock); 1231 1232 reason = iscsi_session_chkready(cls_session); 1233 if (reason) { 1234 sc->result = reason; 1235 goto fault; 1236 } 1237 1238 /* 1239 * ISCSI_STATE_FAILED is a temp. state. The recovery 1240 * code will decide what is best to do with command queued 1241 * during this time 1242 */ 1243 if (session->state != ISCSI_STATE_LOGGED_IN && 1244 session->state != ISCSI_STATE_FAILED) { 1245 /* 1246 * to handle the race between when we set the recovery state 1247 * and block the session we requeue here (commands could 1248 * be entering our queuecommand while a block is starting 1249 * up because the block code is not locked) 1250 */ 1251 switch (session->state) { 1252 case ISCSI_STATE_IN_RECOVERY: 1253 reason = FAILURE_SESSION_IN_RECOVERY; 1254 goto reject; 1255 case ISCSI_STATE_LOGGING_OUT: 1256 reason = FAILURE_SESSION_LOGGING_OUT; 1257 goto reject; 1258 case ISCSI_STATE_RECOVERY_FAILED: 1259 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1260 sc->result = DID_TRANSPORT_FAILFAST << 16; 1261 break; 1262 case ISCSI_STATE_TERMINATE: 1263 reason = FAILURE_SESSION_TERMINATE; 1264 sc->result = DID_NO_CONNECT << 16; 1265 break; 1266 default: 1267 reason = FAILURE_SESSION_FREED; 1268 sc->result = DID_NO_CONNECT << 16; 1269 } 1270 goto fault; 1271 } 1272 1273 conn = session->leadconn; 1274 if (!conn) { 1275 reason = FAILURE_SESSION_FREED; 1276 sc->result = DID_NO_CONNECT << 16; 1277 goto fault; 1278 } 1279 1280 if (iscsi_check_cmdsn_window_closed(conn)) { 1281 reason = FAILURE_WINDOW_CLOSED; 1282 goto reject; 1283 } 1284 1285 if (!__kfifo_get(session->cmdpool.queue, (void*)&task, 1286 sizeof(void*))) { 1287 reason = FAILURE_OOM; 1288 goto reject; 1289 } 1290 sc->SCp.phase = session->age; 1291 sc->SCp.ptr = (char *)task; 1292 1293 atomic_set(&task->refcount, 1); 1294 task->state = ISCSI_TASK_PENDING; 1295 task->conn = conn; 1296 task->sc = sc; 1297 INIT_LIST_HEAD(&task->running); 1298 list_add_tail(&task->running, &conn->xmitqueue); 1299 1300 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 1301 if (iscsi_prep_scsi_cmd_pdu(task)) { 1302 sc->result = DID_ABORT << 16; 1303 sc->scsi_done = NULL; 1304 iscsi_complete_command(task); 1305 goto fault; 1306 } 1307 if (session->tt->xmit_task(task)) { 1308 sc->scsi_done = NULL; 1309 iscsi_complete_command(task); 1310 reason = FAILURE_SESSION_NOT_READY; 1311 goto reject; 1312 } 1313 } else 1314 scsi_queue_work(session->host, &conn->xmitwork); 1315 1316 session->queued_cmdsn++; 1317 spin_unlock(&session->lock); 1318 spin_lock(host->host_lock); 1319 return 0; 1320 1321 reject: 1322 spin_unlock(&session->lock); 1323 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); 1324 spin_lock(host->host_lock); 1325 return SCSI_MLQUEUE_TARGET_BUSY; 1326 1327 fault: 1328 spin_unlock(&session->lock); 1329 debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason); 1330 if (!scsi_bidi_cmnd(sc)) 1331 scsi_set_resid(sc, scsi_bufflen(sc)); 1332 else { 1333 scsi_out(sc)->resid = scsi_out(sc)->length; 1334 scsi_in(sc)->resid = scsi_in(sc)->length; 1335 } 1336 done(sc); 1337 spin_lock(host->host_lock); 1338 return 0; 1339 } 1340 EXPORT_SYMBOL_GPL(iscsi_queuecommand); 1341 1342 int iscsi_change_queue_depth(struct scsi_device *sdev, int depth) 1343 { 1344 if (depth > ISCSI_MAX_CMD_PER_LUN) 1345 depth = ISCSI_MAX_CMD_PER_LUN; 1346 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 1347 return sdev->queue_depth; 1348 } 1349 EXPORT_SYMBOL_GPL(iscsi_change_queue_depth); 1350 1351 void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) 1352 { 1353 struct iscsi_session *session = cls_session->dd_data; 1354 1355 spin_lock_bh(&session->lock); 1356 if (session->state != ISCSI_STATE_LOGGED_IN) { 1357 session->state = ISCSI_STATE_RECOVERY_FAILED; 1358 if (session->leadconn) 1359 wake_up(&session->leadconn->ehwait); 1360 } 1361 spin_unlock_bh(&session->lock); 1362 } 1363 EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); 1364 1365 int iscsi_eh_target_reset(struct scsi_cmnd *sc) 1366 { 1367 struct iscsi_cls_session *cls_session; 1368 struct iscsi_session *session; 1369 struct iscsi_conn *conn; 1370 1371 cls_session = starget_to_session(scsi_target(sc->device)); 1372 session = cls_session->dd_data; 1373 conn = session->leadconn; 1374 1375 mutex_lock(&session->eh_mutex); 1376 spin_lock_bh(&session->lock); 1377 if (session->state == ISCSI_STATE_TERMINATE) { 1378 failed: 1379 debug_scsi("failing target reset: session terminated " 1380 "[CID %d age %d]\n", conn->id, session->age); 1381 spin_unlock_bh(&session->lock); 1382 mutex_unlock(&session->eh_mutex); 1383 return FAILED; 1384 } 1385 1386 spin_unlock_bh(&session->lock); 1387 mutex_unlock(&session->eh_mutex); 1388 /* 1389 * we drop the lock here but the leadconn cannot be destoyed while 1390 * we are in the scsi eh 1391 */ 1392 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1393 1394 debug_scsi("iscsi_eh_target_reset wait for relogin\n"); 1395 wait_event_interruptible(conn->ehwait, 1396 session->state == ISCSI_STATE_TERMINATE || 1397 session->state == ISCSI_STATE_LOGGED_IN || 1398 session->state == ISCSI_STATE_RECOVERY_FAILED); 1399 if (signal_pending(current)) 1400 flush_signals(current); 1401 1402 mutex_lock(&session->eh_mutex); 1403 spin_lock_bh(&session->lock); 1404 if (session->state == ISCSI_STATE_LOGGED_IN) 1405 iscsi_session_printk(KERN_INFO, session, 1406 "target reset succeeded\n"); 1407 else 1408 goto failed; 1409 spin_unlock_bh(&session->lock); 1410 mutex_unlock(&session->eh_mutex); 1411 return SUCCESS; 1412 } 1413 EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); 1414 1415 static void iscsi_tmf_timedout(unsigned long data) 1416 { 1417 struct iscsi_conn *conn = (struct iscsi_conn *)data; 1418 struct iscsi_session *session = conn->session; 1419 1420 spin_lock(&session->lock); 1421 if (conn->tmf_state == TMF_QUEUED) { 1422 conn->tmf_state = TMF_TIMEDOUT; 1423 debug_scsi("tmf timedout\n"); 1424 /* unblock eh_abort() */ 1425 wake_up(&conn->ehwait); 1426 } 1427 spin_unlock(&session->lock); 1428 } 1429 1430 static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, 1431 struct iscsi_tm *hdr, int age, 1432 int timeout) 1433 { 1434 struct iscsi_session *session = conn->session; 1435 struct iscsi_task *task; 1436 1437 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, 1438 NULL, 0); 1439 if (!task) { 1440 spin_unlock_bh(&session->lock); 1441 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1442 spin_lock_bh(&session->lock); 1443 debug_scsi("tmf exec failure\n"); 1444 return -EPERM; 1445 } 1446 conn->tmfcmd_pdus_cnt++; 1447 conn->tmf_timer.expires = timeout * HZ + jiffies; 1448 conn->tmf_timer.function = iscsi_tmf_timedout; 1449 conn->tmf_timer.data = (unsigned long)conn; 1450 add_timer(&conn->tmf_timer); 1451 debug_scsi("tmf set timeout\n"); 1452 1453 spin_unlock_bh(&session->lock); 1454 mutex_unlock(&session->eh_mutex); 1455 1456 /* 1457 * block eh thread until: 1458 * 1459 * 1) tmf response 1460 * 2) tmf timeout 1461 * 3) session is terminated or restarted or userspace has 1462 * given up on recovery 1463 */ 1464 wait_event_interruptible(conn->ehwait, age != session->age || 1465 session->state != ISCSI_STATE_LOGGED_IN || 1466 conn->tmf_state != TMF_QUEUED); 1467 if (signal_pending(current)) 1468 flush_signals(current); 1469 del_timer_sync(&conn->tmf_timer); 1470 1471 mutex_lock(&session->eh_mutex); 1472 spin_lock_bh(&session->lock); 1473 /* if the session drops it will clean up the task */ 1474 if (age != session->age || 1475 session->state != ISCSI_STATE_LOGGED_IN) 1476 return -ENOTCONN; 1477 return 0; 1478 } 1479 1480 /* 1481 * Fail commands. session lock held and recv side suspended and xmit 1482 * thread flushed 1483 */ 1484 static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, 1485 int error) 1486 { 1487 struct iscsi_task *task, *tmp; 1488 1489 if (conn->task && (conn->task->sc->device->lun == lun || lun == -1)) 1490 conn->task = NULL; 1491 1492 /* flush pending */ 1493 list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) { 1494 if (lun == task->sc->device->lun || lun == -1) { 1495 debug_scsi("failing pending sc %p itt 0x%x\n", 1496 task->sc, task->itt); 1497 fail_command(conn, task, error << 16); 1498 } 1499 } 1500 1501 list_for_each_entry_safe(task, tmp, &conn->requeue, running) { 1502 if (lun == task->sc->device->lun || lun == -1) { 1503 debug_scsi("failing requeued sc %p itt 0x%x\n", 1504 task->sc, task->itt); 1505 fail_command(conn, task, error << 16); 1506 } 1507 } 1508 1509 /* fail all other running */ 1510 list_for_each_entry_safe(task, tmp, &conn->run_list, running) { 1511 if (lun == task->sc->device->lun || lun == -1) { 1512 debug_scsi("failing in progress sc %p itt 0x%x\n", 1513 task->sc, task->itt); 1514 fail_command(conn, task, error << 16); 1515 } 1516 } 1517 } 1518 1519 void iscsi_suspend_tx(struct iscsi_conn *conn) 1520 { 1521 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1522 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1523 scsi_flush_work(conn->session->host); 1524 } 1525 EXPORT_SYMBOL_GPL(iscsi_suspend_tx); 1526 1527 static void iscsi_start_tx(struct iscsi_conn *conn) 1528 { 1529 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1530 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1531 scsi_queue_work(conn->session->host, &conn->xmitwork); 1532 } 1533 1534 static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1535 { 1536 struct iscsi_cls_session *cls_session; 1537 struct iscsi_session *session; 1538 struct iscsi_conn *conn; 1539 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; 1540 1541 cls_session = starget_to_session(scsi_target(scmd->device)); 1542 session = cls_session->dd_data; 1543 1544 debug_scsi("scsi cmd %p timedout\n", scmd); 1545 1546 spin_lock(&session->lock); 1547 if (session->state != ISCSI_STATE_LOGGED_IN) { 1548 /* 1549 * We are probably in the middle of iscsi recovery so let 1550 * that complete and handle the error. 1551 */ 1552 rc = BLK_EH_RESET_TIMER; 1553 goto done; 1554 } 1555 1556 conn = session->leadconn; 1557 if (!conn) { 1558 /* In the middle of shuting down */ 1559 rc = BLK_EH_RESET_TIMER; 1560 goto done; 1561 } 1562 1563 if (!conn->recv_timeout && !conn->ping_timeout) 1564 goto done; 1565 /* 1566 * if the ping timedout then we are in the middle of cleaning up 1567 * and can let the iscsi eh handle it 1568 */ 1569 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 1570 (conn->ping_timeout * HZ), jiffies)) 1571 rc = BLK_EH_RESET_TIMER; 1572 /* 1573 * if we are about to check the transport then give the command 1574 * more time 1575 */ 1576 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1577 jiffies)) 1578 rc = BLK_EH_RESET_TIMER; 1579 /* if in the middle of checking the transport then give us more time */ 1580 if (conn->ping_task) 1581 rc = BLK_EH_RESET_TIMER; 1582 done: 1583 spin_unlock(&session->lock); 1584 debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ? 1585 "timer reset" : "nh"); 1586 return rc; 1587 } 1588 1589 static void iscsi_check_transport_timeouts(unsigned long data) 1590 { 1591 struct iscsi_conn *conn = (struct iscsi_conn *)data; 1592 struct iscsi_session *session = conn->session; 1593 unsigned long recv_timeout, next_timeout = 0, last_recv; 1594 1595 spin_lock(&session->lock); 1596 if (session->state != ISCSI_STATE_LOGGED_IN) 1597 goto done; 1598 1599 recv_timeout = conn->recv_timeout; 1600 if (!recv_timeout) 1601 goto done; 1602 1603 recv_timeout *= HZ; 1604 last_recv = conn->last_recv; 1605 if (conn->ping_task && 1606 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), 1607 jiffies)) { 1608 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " 1609 "expired, last rx %lu, last ping %lu, " 1610 "now %lu\n", conn->ping_timeout, last_recv, 1611 conn->last_ping, jiffies); 1612 spin_unlock(&session->lock); 1613 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1614 return; 1615 } 1616 1617 if (time_before_eq(last_recv + recv_timeout, jiffies)) { 1618 /* send a ping to try to provoke some traffic */ 1619 debug_scsi("Sending nopout as ping on conn %p\n", conn); 1620 iscsi_send_nopout(conn, NULL); 1621 next_timeout = conn->last_ping + (conn->ping_timeout * HZ); 1622 } else 1623 next_timeout = last_recv + recv_timeout; 1624 1625 debug_scsi("Setting next tmo %lu\n", next_timeout); 1626 mod_timer(&conn->transport_timer, next_timeout); 1627 done: 1628 spin_unlock(&session->lock); 1629 } 1630 1631 static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, 1632 struct iscsi_tm *hdr) 1633 { 1634 memset(hdr, 0, sizeof(*hdr)); 1635 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 1636 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; 1637 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 1638 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun)); 1639 hdr->rtt = task->hdr->itt; 1640 hdr->refcmdsn = task->hdr->cmdsn; 1641 } 1642 1643 int iscsi_eh_abort(struct scsi_cmnd *sc) 1644 { 1645 struct iscsi_cls_session *cls_session; 1646 struct iscsi_session *session; 1647 struct iscsi_conn *conn; 1648 struct iscsi_task *task; 1649 struct iscsi_tm *hdr; 1650 int rc, age; 1651 1652 cls_session = starget_to_session(scsi_target(sc->device)); 1653 session = cls_session->dd_data; 1654 1655 mutex_lock(&session->eh_mutex); 1656 spin_lock_bh(&session->lock); 1657 /* 1658 * if session was ISCSI_STATE_IN_RECOVERY then we may not have 1659 * got the command. 1660 */ 1661 if (!sc->SCp.ptr) { 1662 debug_scsi("sc never reached iscsi layer or it completed.\n"); 1663 spin_unlock_bh(&session->lock); 1664 mutex_unlock(&session->eh_mutex); 1665 return SUCCESS; 1666 } 1667 1668 /* 1669 * If we are not logged in or we have started a new session 1670 * then let the host reset code handle this 1671 */ 1672 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN || 1673 sc->SCp.phase != session->age) { 1674 spin_unlock_bh(&session->lock); 1675 mutex_unlock(&session->eh_mutex); 1676 return FAILED; 1677 } 1678 1679 conn = session->leadconn; 1680 conn->eh_abort_cnt++; 1681 age = session->age; 1682 1683 task = (struct iscsi_task *)sc->SCp.ptr; 1684 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt); 1685 1686 /* task completed before time out */ 1687 if (!task->sc) { 1688 debug_scsi("sc completed while abort in progress\n"); 1689 goto success; 1690 } 1691 1692 if (task->state == ISCSI_TASK_PENDING) { 1693 fail_command(conn, task, DID_ABORT << 16); 1694 goto success; 1695 } 1696 1697 /* only have one tmf outstanding at a time */ 1698 if (conn->tmf_state != TMF_INITIAL) 1699 goto failed; 1700 conn->tmf_state = TMF_QUEUED; 1701 1702 hdr = &conn->tmhdr; 1703 iscsi_prep_abort_task_pdu(task, hdr); 1704 1705 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) { 1706 rc = FAILED; 1707 goto failed; 1708 } 1709 1710 switch (conn->tmf_state) { 1711 case TMF_SUCCESS: 1712 spin_unlock_bh(&session->lock); 1713 /* 1714 * stop tx side incase the target had sent a abort rsp but 1715 * the initiator was still writing out data. 1716 */ 1717 iscsi_suspend_tx(conn); 1718 /* 1719 * we do not stop the recv side because targets have been 1720 * good and have never sent us a successful tmf response 1721 * then sent more data for the cmd. 1722 */ 1723 spin_lock(&session->lock); 1724 fail_command(conn, task, DID_ABORT << 16); 1725 conn->tmf_state = TMF_INITIAL; 1726 spin_unlock(&session->lock); 1727 iscsi_start_tx(conn); 1728 goto success_unlocked; 1729 case TMF_TIMEDOUT: 1730 spin_unlock_bh(&session->lock); 1731 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1732 goto failed_unlocked; 1733 case TMF_NOT_FOUND: 1734 if (!sc->SCp.ptr) { 1735 conn->tmf_state = TMF_INITIAL; 1736 /* task completed before tmf abort response */ 1737 debug_scsi("sc completed while abort in progress\n"); 1738 goto success; 1739 } 1740 /* fall through */ 1741 default: 1742 conn->tmf_state = TMF_INITIAL; 1743 goto failed; 1744 } 1745 1746 success: 1747 spin_unlock_bh(&session->lock); 1748 success_unlocked: 1749 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt); 1750 mutex_unlock(&session->eh_mutex); 1751 return SUCCESS; 1752 1753 failed: 1754 spin_unlock_bh(&session->lock); 1755 failed_unlocked: 1756 debug_scsi("abort failed [sc %p itt 0x%x]\n", sc, 1757 task ? task->itt : 0); 1758 mutex_unlock(&session->eh_mutex); 1759 return FAILED; 1760 } 1761 EXPORT_SYMBOL_GPL(iscsi_eh_abort); 1762 1763 static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) 1764 { 1765 memset(hdr, 0, sizeof(*hdr)); 1766 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 1767 hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK; 1768 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 1769 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 1770 hdr->rtt = RESERVED_ITT; 1771 } 1772 1773 int iscsi_eh_device_reset(struct scsi_cmnd *sc) 1774 { 1775 struct iscsi_cls_session *cls_session; 1776 struct iscsi_session *session; 1777 struct iscsi_conn *conn; 1778 struct iscsi_tm *hdr; 1779 int rc = FAILED; 1780 1781 cls_session = starget_to_session(scsi_target(sc->device)); 1782 session = cls_session->dd_data; 1783 1784 debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun); 1785 1786 mutex_lock(&session->eh_mutex); 1787 spin_lock_bh(&session->lock); 1788 /* 1789 * Just check if we are not logged in. We cannot check for 1790 * the phase because the reset could come from a ioctl. 1791 */ 1792 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) 1793 goto unlock; 1794 conn = session->leadconn; 1795 1796 /* only have one tmf outstanding at a time */ 1797 if (conn->tmf_state != TMF_INITIAL) 1798 goto unlock; 1799 conn->tmf_state = TMF_QUEUED; 1800 1801 hdr = &conn->tmhdr; 1802 iscsi_prep_lun_reset_pdu(sc, hdr); 1803 1804 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, 1805 session->lu_reset_timeout)) { 1806 rc = FAILED; 1807 goto unlock; 1808 } 1809 1810 switch (conn->tmf_state) { 1811 case TMF_SUCCESS: 1812 break; 1813 case TMF_TIMEDOUT: 1814 spin_unlock_bh(&session->lock); 1815 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1816 goto done; 1817 default: 1818 conn->tmf_state = TMF_INITIAL; 1819 goto unlock; 1820 } 1821 1822 rc = SUCCESS; 1823 spin_unlock_bh(&session->lock); 1824 1825 iscsi_suspend_tx(conn); 1826 1827 spin_lock_bh(&session->lock); 1828 fail_all_commands(conn, sc->device->lun, DID_ERROR); 1829 conn->tmf_state = TMF_INITIAL; 1830 spin_unlock_bh(&session->lock); 1831 1832 iscsi_start_tx(conn); 1833 goto done; 1834 1835 unlock: 1836 spin_unlock_bh(&session->lock); 1837 done: 1838 debug_scsi("iscsi_eh_device_reset %s\n", 1839 rc == SUCCESS ? "SUCCESS" : "FAILED"); 1840 mutex_unlock(&session->eh_mutex); 1841 return rc; 1842 } 1843 EXPORT_SYMBOL_GPL(iscsi_eh_device_reset); 1844 1845 /* 1846 * Pre-allocate a pool of @max items of @item_size. By default, the pool 1847 * should be accessed via kfifo_{get,put} on q->queue. 1848 * Optionally, the caller can obtain the array of object pointers 1849 * by passing in a non-NULL @items pointer 1850 */ 1851 int 1852 iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) 1853 { 1854 int i, num_arrays = 1; 1855 1856 memset(q, 0, sizeof(*q)); 1857 1858 q->max = max; 1859 1860 /* If the user passed an items pointer, he wants a copy of 1861 * the array. */ 1862 if (items) 1863 num_arrays++; 1864 q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL); 1865 if (q->pool == NULL) 1866 goto enomem; 1867 1868 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*), 1869 GFP_KERNEL, NULL); 1870 if (q->queue == ERR_PTR(-ENOMEM)) 1871 goto enomem; 1872 1873 for (i = 0; i < max; i++) { 1874 q->pool[i] = kzalloc(item_size, GFP_KERNEL); 1875 if (q->pool[i] == NULL) { 1876 q->max = i; 1877 goto enomem; 1878 } 1879 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*)); 1880 } 1881 1882 if (items) { 1883 *items = q->pool + max; 1884 memcpy(*items, q->pool, max * sizeof(void *)); 1885 } 1886 1887 return 0; 1888 1889 enomem: 1890 iscsi_pool_free(q); 1891 return -ENOMEM; 1892 } 1893 EXPORT_SYMBOL_GPL(iscsi_pool_init); 1894 1895 void iscsi_pool_free(struct iscsi_pool *q) 1896 { 1897 int i; 1898 1899 for (i = 0; i < q->max; i++) 1900 kfree(q->pool[i]); 1901 if (q->pool) 1902 kfree(q->pool); 1903 } 1904 EXPORT_SYMBOL_GPL(iscsi_pool_free); 1905 1906 /** 1907 * iscsi_host_add - add host to system 1908 * @shost: scsi host 1909 * @pdev: parent device 1910 * 1911 * This should be called by partial offload and software iscsi drivers 1912 * to add a host to the system. 1913 */ 1914 int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev) 1915 { 1916 if (!shost->can_queue) 1917 shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; 1918 1919 return scsi_add_host(shost, pdev); 1920 } 1921 EXPORT_SYMBOL_GPL(iscsi_host_add); 1922 1923 /** 1924 * iscsi_host_alloc - allocate a host and driver data 1925 * @sht: scsi host template 1926 * @dd_data_size: driver host data size 1927 * @qdepth: default device queue depth 1928 * 1929 * This should be called by partial offload and software iscsi drivers. 1930 * To access the driver specific memory use the iscsi_host_priv() macro. 1931 */ 1932 struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, 1933 int dd_data_size, uint16_t qdepth) 1934 { 1935 struct Scsi_Host *shost; 1936 struct iscsi_host *ihost; 1937 1938 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); 1939 if (!shost) 1940 return NULL; 1941 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; 1942 1943 if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { 1944 if (qdepth != 0) 1945 printk(KERN_ERR "iscsi: invalid queue depth of %d. " 1946 "Queue depth must be between 1 and %d.\n", 1947 qdepth, ISCSI_MAX_CMD_PER_LUN); 1948 qdepth = ISCSI_DEF_CMD_PER_LUN; 1949 } 1950 shost->cmd_per_lun = qdepth; 1951 1952 ihost = shost_priv(shost); 1953 spin_lock_init(&ihost->lock); 1954 ihost->state = ISCSI_HOST_SETUP; 1955 ihost->num_sessions = 0; 1956 init_waitqueue_head(&ihost->session_removal_wq); 1957 return shost; 1958 } 1959 EXPORT_SYMBOL_GPL(iscsi_host_alloc); 1960 1961 static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session) 1962 { 1963 iscsi_session_failure(cls_session, ISCSI_ERR_INVALID_HOST); 1964 } 1965 1966 /** 1967 * iscsi_host_remove - remove host and sessions 1968 * @shost: scsi host 1969 * 1970 * If there are any sessions left, this will initiate the removal and wait 1971 * for the completion. 1972 */ 1973 void iscsi_host_remove(struct Scsi_Host *shost) 1974 { 1975 struct iscsi_host *ihost = shost_priv(shost); 1976 unsigned long flags; 1977 1978 spin_lock_irqsave(&ihost->lock, flags); 1979 ihost->state = ISCSI_HOST_REMOVED; 1980 spin_unlock_irqrestore(&ihost->lock, flags); 1981 1982 iscsi_host_for_each_session(shost, iscsi_notify_host_removed); 1983 wait_event_interruptible(ihost->session_removal_wq, 1984 ihost->num_sessions == 0); 1985 if (signal_pending(current)) 1986 flush_signals(current); 1987 1988 scsi_remove_host(shost); 1989 } 1990 EXPORT_SYMBOL_GPL(iscsi_host_remove); 1991 1992 void iscsi_host_free(struct Scsi_Host *shost) 1993 { 1994 struct iscsi_host *ihost = shost_priv(shost); 1995 1996 kfree(ihost->netdev); 1997 kfree(ihost->hwaddress); 1998 kfree(ihost->initiatorname); 1999 scsi_host_put(shost); 2000 } 2001 EXPORT_SYMBOL_GPL(iscsi_host_free); 2002 2003 static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost) 2004 { 2005 struct iscsi_host *ihost = shost_priv(shost); 2006 unsigned long flags; 2007 2008 shost = scsi_host_get(shost); 2009 if (!shost) { 2010 printk(KERN_ERR "Invalid state. Cannot notify host removal " 2011 "of session teardown event because host already " 2012 "removed.\n"); 2013 return; 2014 } 2015 2016 spin_lock_irqsave(&ihost->lock, flags); 2017 ihost->num_sessions--; 2018 if (ihost->num_sessions == 0) 2019 wake_up(&ihost->session_removal_wq); 2020 spin_unlock_irqrestore(&ihost->lock, flags); 2021 scsi_host_put(shost); 2022 } 2023 2024 /** 2025 * iscsi_session_setup - create iscsi cls session and host and session 2026 * @iscsit: iscsi transport template 2027 * @shost: scsi host 2028 * @cmds_max: session can queue 2029 * @cmd_task_size: LLD task private data size 2030 * @initial_cmdsn: initial CmdSN 2031 * 2032 * This can be used by software iscsi_transports that allocate 2033 * a session per scsi host. 2034 * 2035 * Callers should set cmds_max to the largest total numer (mgmt + scsi) of 2036 * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks 2037 * for nop handling and login/logout requests. 2038 */ 2039 struct iscsi_cls_session * 2040 iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, 2041 uint16_t cmds_max, int cmd_task_size, 2042 uint32_t initial_cmdsn, unsigned int id) 2043 { 2044 struct iscsi_host *ihost = shost_priv(shost); 2045 struct iscsi_session *session; 2046 struct iscsi_cls_session *cls_session; 2047 int cmd_i, scsi_cmds, total_cmds = cmds_max; 2048 unsigned long flags; 2049 2050 spin_lock_irqsave(&ihost->lock, flags); 2051 if (ihost->state == ISCSI_HOST_REMOVED) { 2052 spin_unlock_irqrestore(&ihost->lock, flags); 2053 return NULL; 2054 } 2055 ihost->num_sessions++; 2056 spin_unlock_irqrestore(&ihost->lock, flags); 2057 2058 if (!total_cmds) 2059 total_cmds = ISCSI_DEF_XMIT_CMDS_MAX; 2060 /* 2061 * The iscsi layer needs some tasks for nop handling and tmfs, 2062 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX 2063 * + 1 command for scsi IO. 2064 */ 2065 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { 2066 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " 2067 "must be a power of two that is at least %d.\n", 2068 total_cmds, ISCSI_TOTAL_CMDS_MIN); 2069 goto dec_session_count; 2070 } 2071 2072 if (total_cmds > ISCSI_TOTAL_CMDS_MAX) { 2073 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " 2074 "must be a power of 2 less than or equal to %d.\n", 2075 cmds_max, ISCSI_TOTAL_CMDS_MAX); 2076 total_cmds = ISCSI_TOTAL_CMDS_MAX; 2077 } 2078 2079 if (!is_power_of_2(total_cmds)) { 2080 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " 2081 "must be a power of 2.\n", total_cmds); 2082 total_cmds = rounddown_pow_of_two(total_cmds); 2083 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) 2084 return NULL; 2085 printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n", 2086 total_cmds); 2087 } 2088 scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX; 2089 2090 cls_session = iscsi_alloc_session(shost, iscsit, 2091 sizeof(struct iscsi_session)); 2092 if (!cls_session) 2093 goto dec_session_count; 2094 session = cls_session->dd_data; 2095 session->cls_session = cls_session; 2096 session->host = shost; 2097 session->state = ISCSI_STATE_FREE; 2098 session->fast_abort = 1; 2099 session->lu_reset_timeout = 15; 2100 session->abort_timeout = 10; 2101 session->scsi_cmds_max = scsi_cmds; 2102 session->cmds_max = total_cmds; 2103 session->queued_cmdsn = session->cmdsn = initial_cmdsn; 2104 session->exp_cmdsn = initial_cmdsn + 1; 2105 session->max_cmdsn = initial_cmdsn + 1; 2106 session->max_r2t = 1; 2107 session->tt = iscsit; 2108 mutex_init(&session->eh_mutex); 2109 spin_lock_init(&session->lock); 2110 2111 /* initialize SCSI PDU commands pool */ 2112 if (iscsi_pool_init(&session->cmdpool, session->cmds_max, 2113 (void***)&session->cmds, 2114 cmd_task_size + sizeof(struct iscsi_task))) 2115 goto cmdpool_alloc_fail; 2116 2117 /* pre-format cmds pool with ITT */ 2118 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { 2119 struct iscsi_task *task = session->cmds[cmd_i]; 2120 2121 if (cmd_task_size) 2122 task->dd_data = &task[1]; 2123 task->itt = cmd_i; 2124 INIT_LIST_HEAD(&task->running); 2125 } 2126 2127 if (!try_module_get(iscsit->owner)) 2128 goto module_get_fail; 2129 2130 if (iscsi_add_session(cls_session, id)) 2131 goto cls_session_fail; 2132 2133 return cls_session; 2134 2135 cls_session_fail: 2136 module_put(iscsit->owner); 2137 module_get_fail: 2138 iscsi_pool_free(&session->cmdpool); 2139 cmdpool_alloc_fail: 2140 iscsi_free_session(cls_session); 2141 dec_session_count: 2142 iscsi_host_dec_session_cnt(shost); 2143 return NULL; 2144 } 2145 EXPORT_SYMBOL_GPL(iscsi_session_setup); 2146 2147 /** 2148 * iscsi_session_teardown - destroy session, host, and cls_session 2149 * @cls_session: iscsi session 2150 * 2151 * The driver must have called iscsi_remove_session before 2152 * calling this. 2153 */ 2154 void iscsi_session_teardown(struct iscsi_cls_session *cls_session) 2155 { 2156 struct iscsi_session *session = cls_session->dd_data; 2157 struct module *owner = cls_session->transport->owner; 2158 struct Scsi_Host *shost = session->host; 2159 2160 iscsi_pool_free(&session->cmdpool); 2161 2162 kfree(session->password); 2163 kfree(session->password_in); 2164 kfree(session->username); 2165 kfree(session->username_in); 2166 kfree(session->targetname); 2167 kfree(session->initiatorname); 2168 kfree(session->ifacename); 2169 2170 iscsi_destroy_session(cls_session); 2171 iscsi_host_dec_session_cnt(shost); 2172 module_put(owner); 2173 } 2174 EXPORT_SYMBOL_GPL(iscsi_session_teardown); 2175 2176 /** 2177 * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn 2178 * @cls_session: iscsi_cls_session 2179 * @dd_size: private driver data size 2180 * @conn_idx: cid 2181 */ 2182 struct iscsi_cls_conn * 2183 iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, 2184 uint32_t conn_idx) 2185 { 2186 struct iscsi_session *session = cls_session->dd_data; 2187 struct iscsi_conn *conn; 2188 struct iscsi_cls_conn *cls_conn; 2189 char *data; 2190 2191 cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size, 2192 conn_idx); 2193 if (!cls_conn) 2194 return NULL; 2195 conn = cls_conn->dd_data; 2196 memset(conn, 0, sizeof(*conn) + dd_size); 2197 2198 conn->dd_data = cls_conn->dd_data + sizeof(*conn); 2199 conn->session = session; 2200 conn->cls_conn = cls_conn; 2201 conn->c_stage = ISCSI_CONN_INITIAL_STAGE; 2202 conn->id = conn_idx; 2203 conn->exp_statsn = 0; 2204 conn->tmf_state = TMF_INITIAL; 2205 2206 init_timer(&conn->transport_timer); 2207 conn->transport_timer.data = (unsigned long)conn; 2208 conn->transport_timer.function = iscsi_check_transport_timeouts; 2209 2210 INIT_LIST_HEAD(&conn->run_list); 2211 INIT_LIST_HEAD(&conn->mgmt_run_list); 2212 INIT_LIST_HEAD(&conn->mgmtqueue); 2213 INIT_LIST_HEAD(&conn->xmitqueue); 2214 INIT_LIST_HEAD(&conn->requeue); 2215 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 2216 2217 /* allocate login_task used for the login/text sequences */ 2218 spin_lock_bh(&session->lock); 2219 if (!__kfifo_get(session->cmdpool.queue, 2220 (void*)&conn->login_task, 2221 sizeof(void*))) { 2222 spin_unlock_bh(&session->lock); 2223 goto login_task_alloc_fail; 2224 } 2225 spin_unlock_bh(&session->lock); 2226 2227 data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL); 2228 if (!data) 2229 goto login_task_data_alloc_fail; 2230 conn->login_task->data = conn->data = data; 2231 2232 init_timer(&conn->tmf_timer); 2233 init_waitqueue_head(&conn->ehwait); 2234 2235 return cls_conn; 2236 2237 login_task_data_alloc_fail: 2238 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, 2239 sizeof(void*)); 2240 login_task_alloc_fail: 2241 iscsi_destroy_conn(cls_conn); 2242 return NULL; 2243 } 2244 EXPORT_SYMBOL_GPL(iscsi_conn_setup); 2245 2246 /** 2247 * iscsi_conn_teardown - teardown iscsi connection 2248 * cls_conn: iscsi class connection 2249 * 2250 * TODO: we may need to make this into a two step process 2251 * like scsi-mls remove + put host 2252 */ 2253 void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) 2254 { 2255 struct iscsi_conn *conn = cls_conn->dd_data; 2256 struct iscsi_session *session = conn->session; 2257 unsigned long flags; 2258 2259 del_timer_sync(&conn->transport_timer); 2260 2261 spin_lock_bh(&session->lock); 2262 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 2263 if (session->leadconn == conn) { 2264 /* 2265 * leading connection? then give up on recovery. 2266 */ 2267 session->state = ISCSI_STATE_TERMINATE; 2268 wake_up(&conn->ehwait); 2269 } 2270 spin_unlock_bh(&session->lock); 2271 2272 /* 2273 * Block until all in-progress commands for this connection 2274 * time out or fail. 2275 */ 2276 for (;;) { 2277 spin_lock_irqsave(session->host->host_lock, flags); 2278 if (!session->host->host_busy) { /* OK for ERL == 0 */ 2279 spin_unlock_irqrestore(session->host->host_lock, flags); 2280 break; 2281 } 2282 spin_unlock_irqrestore(session->host->host_lock, flags); 2283 msleep_interruptible(500); 2284 iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): " 2285 "host_busy %d host_failed %d\n", 2286 session->host->host_busy, 2287 session->host->host_failed); 2288 /* 2289 * force eh_abort() to unblock 2290 */ 2291 wake_up(&conn->ehwait); 2292 } 2293 2294 /* flush queued up work because we free the connection below */ 2295 iscsi_suspend_tx(conn); 2296 2297 spin_lock_bh(&session->lock); 2298 kfree(conn->data); 2299 kfree(conn->persistent_address); 2300 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, 2301 sizeof(void*)); 2302 if (session->leadconn == conn) 2303 session->leadconn = NULL; 2304 spin_unlock_bh(&session->lock); 2305 2306 iscsi_destroy_conn(cls_conn); 2307 } 2308 EXPORT_SYMBOL_GPL(iscsi_conn_teardown); 2309 2310 int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) 2311 { 2312 struct iscsi_conn *conn = cls_conn->dd_data; 2313 struct iscsi_session *session = conn->session; 2314 2315 if (!session) { 2316 iscsi_conn_printk(KERN_ERR, conn, 2317 "can't start unbound connection\n"); 2318 return -EPERM; 2319 } 2320 2321 if ((session->imm_data_en || !session->initial_r2t_en) && 2322 session->first_burst > session->max_burst) { 2323 iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: " 2324 "first_burst %d max_burst %d\n", 2325 session->first_burst, session->max_burst); 2326 return -EINVAL; 2327 } 2328 2329 if (conn->ping_timeout && !conn->recv_timeout) { 2330 iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of " 2331 "zero. Using 5 seconds\n."); 2332 conn->recv_timeout = 5; 2333 } 2334 2335 if (conn->recv_timeout && !conn->ping_timeout) { 2336 iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of " 2337 "zero. Using 5 seconds.\n"); 2338 conn->ping_timeout = 5; 2339 } 2340 2341 spin_lock_bh(&session->lock); 2342 conn->c_stage = ISCSI_CONN_STARTED; 2343 session->state = ISCSI_STATE_LOGGED_IN; 2344 session->queued_cmdsn = session->cmdsn; 2345 2346 conn->last_recv = jiffies; 2347 conn->last_ping = jiffies; 2348 if (conn->recv_timeout && conn->ping_timeout) 2349 mod_timer(&conn->transport_timer, 2350 jiffies + (conn->recv_timeout * HZ)); 2351 2352 switch(conn->stop_stage) { 2353 case STOP_CONN_RECOVER: 2354 /* 2355 * unblock eh_abort() if it is blocked. re-try all 2356 * commands after successful recovery 2357 */ 2358 conn->stop_stage = 0; 2359 conn->tmf_state = TMF_INITIAL; 2360 session->age++; 2361 if (session->age == 16) 2362 session->age = 0; 2363 break; 2364 case STOP_CONN_TERM: 2365 conn->stop_stage = 0; 2366 break; 2367 default: 2368 break; 2369 } 2370 spin_unlock_bh(&session->lock); 2371 2372 iscsi_unblock_session(session->cls_session); 2373 wake_up(&conn->ehwait); 2374 return 0; 2375 } 2376 EXPORT_SYMBOL_GPL(iscsi_conn_start); 2377 2378 static void 2379 flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) 2380 { 2381 struct iscsi_task *task, *tmp; 2382 2383 /* handle pending */ 2384 list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) { 2385 debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt); 2386 /* release ref from prep task */ 2387 __iscsi_put_task(task); 2388 } 2389 2390 /* handle running */ 2391 list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) { 2392 debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt); 2393 /* release ref from prep task */ 2394 __iscsi_put_task(task); 2395 } 2396 2397 conn->task = NULL; 2398 } 2399 2400 static void iscsi_start_session_recovery(struct iscsi_session *session, 2401 struct iscsi_conn *conn, int flag) 2402 { 2403 int old_stop_stage; 2404 2405 del_timer_sync(&conn->transport_timer); 2406 2407 mutex_lock(&session->eh_mutex); 2408 spin_lock_bh(&session->lock); 2409 if (conn->stop_stage == STOP_CONN_TERM) { 2410 spin_unlock_bh(&session->lock); 2411 mutex_unlock(&session->eh_mutex); 2412 return; 2413 } 2414 2415 /* 2416 * When this is called for the in_login state, we only want to clean 2417 * up the login task and connection. We do not need to block and set 2418 * the recovery state again 2419 */ 2420 if (flag == STOP_CONN_TERM) 2421 session->state = ISCSI_STATE_TERMINATE; 2422 else if (conn->stop_stage != STOP_CONN_RECOVER) 2423 session->state = ISCSI_STATE_IN_RECOVERY; 2424 2425 old_stop_stage = conn->stop_stage; 2426 conn->stop_stage = flag; 2427 conn->c_stage = ISCSI_CONN_STOPPED; 2428 spin_unlock_bh(&session->lock); 2429 2430 iscsi_suspend_tx(conn); 2431 /* 2432 * for connection level recovery we should not calculate 2433 * header digest. conn->hdr_size used for optimization 2434 * in hdr_extract() and will be re-negotiated at 2435 * set_param() time. 2436 */ 2437 if (flag == STOP_CONN_RECOVER) { 2438 conn->hdrdgst_en = 0; 2439 conn->datadgst_en = 0; 2440 if (session->state == ISCSI_STATE_IN_RECOVERY && 2441 old_stop_stage != STOP_CONN_RECOVER) { 2442 debug_scsi("blocking session\n"); 2443 iscsi_block_session(session->cls_session); 2444 } 2445 } 2446 2447 /* 2448 * flush queues. 2449 */ 2450 spin_lock_bh(&session->lock); 2451 if (flag == STOP_CONN_RECOVER) 2452 fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED); 2453 else 2454 fail_all_commands(conn, -1, DID_ERROR); 2455 flush_control_queues(session, conn); 2456 spin_unlock_bh(&session->lock); 2457 mutex_unlock(&session->eh_mutex); 2458 } 2459 2460 void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 2461 { 2462 struct iscsi_conn *conn = cls_conn->dd_data; 2463 struct iscsi_session *session = conn->session; 2464 2465 switch (flag) { 2466 case STOP_CONN_RECOVER: 2467 case STOP_CONN_TERM: 2468 iscsi_start_session_recovery(session, conn, flag); 2469 break; 2470 default: 2471 iscsi_conn_printk(KERN_ERR, conn, 2472 "invalid stop flag %d\n", flag); 2473 } 2474 } 2475 EXPORT_SYMBOL_GPL(iscsi_conn_stop); 2476 2477 int iscsi_conn_bind(struct iscsi_cls_session *cls_session, 2478 struct iscsi_cls_conn *cls_conn, int is_leading) 2479 { 2480 struct iscsi_session *session = cls_session->dd_data; 2481 struct iscsi_conn *conn = cls_conn->dd_data; 2482 2483 spin_lock_bh(&session->lock); 2484 if (is_leading) 2485 session->leadconn = conn; 2486 spin_unlock_bh(&session->lock); 2487 2488 /* 2489 * Unblock xmitworker(), Login Phase will pass through. 2490 */ 2491 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 2492 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 2493 return 0; 2494 } 2495 EXPORT_SYMBOL_GPL(iscsi_conn_bind); 2496 2497 2498 int iscsi_set_param(struct iscsi_cls_conn *cls_conn, 2499 enum iscsi_param param, char *buf, int buflen) 2500 { 2501 struct iscsi_conn *conn = cls_conn->dd_data; 2502 struct iscsi_session *session = conn->session; 2503 uint32_t value; 2504 2505 switch(param) { 2506 case ISCSI_PARAM_FAST_ABORT: 2507 sscanf(buf, "%d", &session->fast_abort); 2508 break; 2509 case ISCSI_PARAM_ABORT_TMO: 2510 sscanf(buf, "%d", &session->abort_timeout); 2511 break; 2512 case ISCSI_PARAM_LU_RESET_TMO: 2513 sscanf(buf, "%d", &session->lu_reset_timeout); 2514 break; 2515 case ISCSI_PARAM_PING_TMO: 2516 sscanf(buf, "%d", &conn->ping_timeout); 2517 break; 2518 case ISCSI_PARAM_RECV_TMO: 2519 sscanf(buf, "%d", &conn->recv_timeout); 2520 break; 2521 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2522 sscanf(buf, "%d", &conn->max_recv_dlength); 2523 break; 2524 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2525 sscanf(buf, "%d", &conn->max_xmit_dlength); 2526 break; 2527 case ISCSI_PARAM_HDRDGST_EN: 2528 sscanf(buf, "%d", &conn->hdrdgst_en); 2529 break; 2530 case ISCSI_PARAM_DATADGST_EN: 2531 sscanf(buf, "%d", &conn->datadgst_en); 2532 break; 2533 case ISCSI_PARAM_INITIAL_R2T_EN: 2534 sscanf(buf, "%d", &session->initial_r2t_en); 2535 break; 2536 case ISCSI_PARAM_MAX_R2T: 2537 sscanf(buf, "%d", &session->max_r2t); 2538 break; 2539 case ISCSI_PARAM_IMM_DATA_EN: 2540 sscanf(buf, "%d", &session->imm_data_en); 2541 break; 2542 case ISCSI_PARAM_FIRST_BURST: 2543 sscanf(buf, "%d", &session->first_burst); 2544 break; 2545 case ISCSI_PARAM_MAX_BURST: 2546 sscanf(buf, "%d", &session->max_burst); 2547 break; 2548 case ISCSI_PARAM_PDU_INORDER_EN: 2549 sscanf(buf, "%d", &session->pdu_inorder_en); 2550 break; 2551 case ISCSI_PARAM_DATASEQ_INORDER_EN: 2552 sscanf(buf, "%d", &session->dataseq_inorder_en); 2553 break; 2554 case ISCSI_PARAM_ERL: 2555 sscanf(buf, "%d", &session->erl); 2556 break; 2557 case ISCSI_PARAM_IFMARKER_EN: 2558 sscanf(buf, "%d", &value); 2559 BUG_ON(value); 2560 break; 2561 case ISCSI_PARAM_OFMARKER_EN: 2562 sscanf(buf, "%d", &value); 2563 BUG_ON(value); 2564 break; 2565 case ISCSI_PARAM_EXP_STATSN: 2566 sscanf(buf, "%u", &conn->exp_statsn); 2567 break; 2568 case ISCSI_PARAM_USERNAME: 2569 kfree(session->username); 2570 session->username = kstrdup(buf, GFP_KERNEL); 2571 if (!session->username) 2572 return -ENOMEM; 2573 break; 2574 case ISCSI_PARAM_USERNAME_IN: 2575 kfree(session->username_in); 2576 session->username_in = kstrdup(buf, GFP_KERNEL); 2577 if (!session->username_in) 2578 return -ENOMEM; 2579 break; 2580 case ISCSI_PARAM_PASSWORD: 2581 kfree(session->password); 2582 session->password = kstrdup(buf, GFP_KERNEL); 2583 if (!session->password) 2584 return -ENOMEM; 2585 break; 2586 case ISCSI_PARAM_PASSWORD_IN: 2587 kfree(session->password_in); 2588 session->password_in = kstrdup(buf, GFP_KERNEL); 2589 if (!session->password_in) 2590 return -ENOMEM; 2591 break; 2592 case ISCSI_PARAM_TARGET_NAME: 2593 /* this should not change between logins */ 2594 if (session->targetname) 2595 break; 2596 2597 session->targetname = kstrdup(buf, GFP_KERNEL); 2598 if (!session->targetname) 2599 return -ENOMEM; 2600 break; 2601 case ISCSI_PARAM_TPGT: 2602 sscanf(buf, "%d", &session->tpgt); 2603 break; 2604 case ISCSI_PARAM_PERSISTENT_PORT: 2605 sscanf(buf, "%d", &conn->persistent_port); 2606 break; 2607 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2608 /* 2609 * this is the address returned in discovery so it should 2610 * not change between logins. 2611 */ 2612 if (conn->persistent_address) 2613 break; 2614 2615 conn->persistent_address = kstrdup(buf, GFP_KERNEL); 2616 if (!conn->persistent_address) 2617 return -ENOMEM; 2618 break; 2619 case ISCSI_PARAM_IFACE_NAME: 2620 if (!session->ifacename) 2621 session->ifacename = kstrdup(buf, GFP_KERNEL); 2622 break; 2623 case ISCSI_PARAM_INITIATOR_NAME: 2624 if (!session->initiatorname) 2625 session->initiatorname = kstrdup(buf, GFP_KERNEL); 2626 break; 2627 default: 2628 return -ENOSYS; 2629 } 2630 2631 return 0; 2632 } 2633 EXPORT_SYMBOL_GPL(iscsi_set_param); 2634 2635 int iscsi_session_get_param(struct iscsi_cls_session *cls_session, 2636 enum iscsi_param param, char *buf) 2637 { 2638 struct iscsi_session *session = cls_session->dd_data; 2639 int len; 2640 2641 switch(param) { 2642 case ISCSI_PARAM_FAST_ABORT: 2643 len = sprintf(buf, "%d\n", session->fast_abort); 2644 break; 2645 case ISCSI_PARAM_ABORT_TMO: 2646 len = sprintf(buf, "%d\n", session->abort_timeout); 2647 break; 2648 case ISCSI_PARAM_LU_RESET_TMO: 2649 len = sprintf(buf, "%d\n", session->lu_reset_timeout); 2650 break; 2651 case ISCSI_PARAM_INITIAL_R2T_EN: 2652 len = sprintf(buf, "%d\n", session->initial_r2t_en); 2653 break; 2654 case ISCSI_PARAM_MAX_R2T: 2655 len = sprintf(buf, "%hu\n", session->max_r2t); 2656 break; 2657 case ISCSI_PARAM_IMM_DATA_EN: 2658 len = sprintf(buf, "%d\n", session->imm_data_en); 2659 break; 2660 case ISCSI_PARAM_FIRST_BURST: 2661 len = sprintf(buf, "%u\n", session->first_burst); 2662 break; 2663 case ISCSI_PARAM_MAX_BURST: 2664 len = sprintf(buf, "%u\n", session->max_burst); 2665 break; 2666 case ISCSI_PARAM_PDU_INORDER_EN: 2667 len = sprintf(buf, "%d\n", session->pdu_inorder_en); 2668 break; 2669 case ISCSI_PARAM_DATASEQ_INORDER_EN: 2670 len = sprintf(buf, "%d\n", session->dataseq_inorder_en); 2671 break; 2672 case ISCSI_PARAM_ERL: 2673 len = sprintf(buf, "%d\n", session->erl); 2674 break; 2675 case ISCSI_PARAM_TARGET_NAME: 2676 len = sprintf(buf, "%s\n", session->targetname); 2677 break; 2678 case ISCSI_PARAM_TPGT: 2679 len = sprintf(buf, "%d\n", session->tpgt); 2680 break; 2681 case ISCSI_PARAM_USERNAME: 2682 len = sprintf(buf, "%s\n", session->username); 2683 break; 2684 case ISCSI_PARAM_USERNAME_IN: 2685 len = sprintf(buf, "%s\n", session->username_in); 2686 break; 2687 case ISCSI_PARAM_PASSWORD: 2688 len = sprintf(buf, "%s\n", session->password); 2689 break; 2690 case ISCSI_PARAM_PASSWORD_IN: 2691 len = sprintf(buf, "%s\n", session->password_in); 2692 break; 2693 case ISCSI_PARAM_IFACE_NAME: 2694 len = sprintf(buf, "%s\n", session->ifacename); 2695 break; 2696 case ISCSI_PARAM_INITIATOR_NAME: 2697 if (!session->initiatorname) 2698 len = sprintf(buf, "%s\n", "unknown"); 2699 else 2700 len = sprintf(buf, "%s\n", session->initiatorname); 2701 break; 2702 default: 2703 return -ENOSYS; 2704 } 2705 2706 return len; 2707 } 2708 EXPORT_SYMBOL_GPL(iscsi_session_get_param); 2709 2710 int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, 2711 enum iscsi_param param, char *buf) 2712 { 2713 struct iscsi_conn *conn = cls_conn->dd_data; 2714 int len; 2715 2716 switch(param) { 2717 case ISCSI_PARAM_PING_TMO: 2718 len = sprintf(buf, "%u\n", conn->ping_timeout); 2719 break; 2720 case ISCSI_PARAM_RECV_TMO: 2721 len = sprintf(buf, "%u\n", conn->recv_timeout); 2722 break; 2723 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2724 len = sprintf(buf, "%u\n", conn->max_recv_dlength); 2725 break; 2726 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2727 len = sprintf(buf, "%u\n", conn->max_xmit_dlength); 2728 break; 2729 case ISCSI_PARAM_HDRDGST_EN: 2730 len = sprintf(buf, "%d\n", conn->hdrdgst_en); 2731 break; 2732 case ISCSI_PARAM_DATADGST_EN: 2733 len = sprintf(buf, "%d\n", conn->datadgst_en); 2734 break; 2735 case ISCSI_PARAM_IFMARKER_EN: 2736 len = sprintf(buf, "%d\n", conn->ifmarker_en); 2737 break; 2738 case ISCSI_PARAM_OFMARKER_EN: 2739 len = sprintf(buf, "%d\n", conn->ofmarker_en); 2740 break; 2741 case ISCSI_PARAM_EXP_STATSN: 2742 len = sprintf(buf, "%u\n", conn->exp_statsn); 2743 break; 2744 case ISCSI_PARAM_PERSISTENT_PORT: 2745 len = sprintf(buf, "%d\n", conn->persistent_port); 2746 break; 2747 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2748 len = sprintf(buf, "%s\n", conn->persistent_address); 2749 break; 2750 default: 2751 return -ENOSYS; 2752 } 2753 2754 return len; 2755 } 2756 EXPORT_SYMBOL_GPL(iscsi_conn_get_param); 2757 2758 int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2759 char *buf) 2760 { 2761 struct iscsi_host *ihost = shost_priv(shost); 2762 int len; 2763 2764 switch (param) { 2765 case ISCSI_HOST_PARAM_NETDEV_NAME: 2766 if (!ihost->netdev) 2767 len = sprintf(buf, "%s\n", "default"); 2768 else 2769 len = sprintf(buf, "%s\n", ihost->netdev); 2770 break; 2771 case ISCSI_HOST_PARAM_HWADDRESS: 2772 if (!ihost->hwaddress) 2773 len = sprintf(buf, "%s\n", "default"); 2774 else 2775 len = sprintf(buf, "%s\n", ihost->hwaddress); 2776 break; 2777 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2778 if (!ihost->initiatorname) 2779 len = sprintf(buf, "%s\n", "unknown"); 2780 else 2781 len = sprintf(buf, "%s\n", ihost->initiatorname); 2782 break; 2783 case ISCSI_HOST_PARAM_IPADDRESS: 2784 if (!strlen(ihost->local_address)) 2785 len = sprintf(buf, "%s\n", "unknown"); 2786 else 2787 len = sprintf(buf, "%s\n", 2788 ihost->local_address); 2789 break; 2790 default: 2791 return -ENOSYS; 2792 } 2793 2794 return len; 2795 } 2796 EXPORT_SYMBOL_GPL(iscsi_host_get_param); 2797 2798 int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2799 char *buf, int buflen) 2800 { 2801 struct iscsi_host *ihost = shost_priv(shost); 2802 2803 switch (param) { 2804 case ISCSI_HOST_PARAM_NETDEV_NAME: 2805 if (!ihost->netdev) 2806 ihost->netdev = kstrdup(buf, GFP_KERNEL); 2807 break; 2808 case ISCSI_HOST_PARAM_HWADDRESS: 2809 if (!ihost->hwaddress) 2810 ihost->hwaddress = kstrdup(buf, GFP_KERNEL); 2811 break; 2812 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2813 if (!ihost->initiatorname) 2814 ihost->initiatorname = kstrdup(buf, GFP_KERNEL); 2815 break; 2816 default: 2817 return -ENOSYS; 2818 } 2819 2820 return 0; 2821 } 2822 EXPORT_SYMBOL_GPL(iscsi_host_set_param); 2823 2824 MODULE_AUTHOR("Mike Christie"); 2825 MODULE_DESCRIPTION("iSCSI library functions"); 2826 MODULE_LICENSE("GPL"); 2827