1 /* 2 * iSCSI lib functions 3 * 4 * Copyright (C) 2006 Red Hat, Inc. All rights reserved. 5 * Copyright (C) 2004 - 2006 Mike Christie 6 * Copyright (C) 2004 - 2005 Dmitry Yusupov 7 * Copyright (C) 2004 - 2005 Alex Aizman 8 * maintained by open-iscsi@googlegroups.com 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 */ 24 #include <linux/types.h> 25 #include <linux/kfifo.h> 26 #include <linux/delay.h> 27 #include <linux/log2.h> 28 #include <asm/unaligned.h> 29 #include <net/tcp.h> 30 #include <scsi/scsi_cmnd.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_eh.h> 33 #include <scsi/scsi_tcq.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi.h> 36 #include <scsi/iscsi_proto.h> 37 #include <scsi/scsi_transport.h> 38 #include <scsi/scsi_transport_iscsi.h> 39 #include <scsi/libiscsi.h> 40 41 /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ 42 #define SNA32_CHECK 2147483648UL 43 44 static int iscsi_sna_lt(u32 n1, u32 n2) 45 { 46 return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) || 47 (n1 > n2 && (n2 - n1 < SNA32_CHECK))); 48 } 49 50 /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ 51 static int iscsi_sna_lte(u32 n1, u32 n2) 52 { 53 return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) || 54 (n1 > n2 && (n2 - n1 < SNA32_CHECK))); 55 } 56 57 void 58 iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) 59 { 60 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn); 61 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn); 62 63 /* 64 * standard specifies this check for when to update expected and 65 * max sequence numbers 66 */ 67 if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1)) 68 return; 69 70 if (exp_cmdsn != session->exp_cmdsn && 71 !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn)) 72 session->exp_cmdsn = exp_cmdsn; 73 74 if (max_cmdsn != session->max_cmdsn && 75 !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) { 76 session->max_cmdsn = max_cmdsn; 77 /* 78 * if the window closed with IO queued, then kick the 79 * xmit thread 80 */ 81 if (!list_empty(&session->leadconn->xmitqueue) || 82 !list_empty(&session->leadconn->mgmtqueue)) { 83 if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 84 scsi_queue_work(session->host, 85 &session->leadconn->xmitwork); 86 } 87 } 88 } 89 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 90 91 void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task, 92 struct iscsi_data *hdr) 93 { 94 struct iscsi_conn *conn = task->conn; 95 96 memset(hdr, 0, sizeof(struct iscsi_data)); 97 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); 98 hdr->datasn = cpu_to_be32(task->unsol_datasn); 99 task->unsol_datasn++; 100 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 101 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun)); 102 103 hdr->itt = task->hdr->itt; 104 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 105 hdr->offset = cpu_to_be32(task->unsol_offset); 106 107 if (task->unsol_count > conn->max_xmit_dlength) { 108 hton24(hdr->dlength, conn->max_xmit_dlength); 109 task->data_count = conn->max_xmit_dlength; 110 task->unsol_offset += task->data_count; 111 hdr->flags = 0; 112 } else { 113 hton24(hdr->dlength, task->unsol_count); 114 task->data_count = task->unsol_count; 115 hdr->flags = ISCSI_FLAG_CMD_FINAL; 116 } 117 } 118 EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu); 119 120 static int iscsi_add_hdr(struct iscsi_task *task, unsigned len) 121 { 122 unsigned exp_len = task->hdr_len + len; 123 124 if (exp_len > task->hdr_max) { 125 WARN_ON(1); 126 return -EINVAL; 127 } 128 129 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */ 130 task->hdr_len = exp_len; 131 return 0; 132 } 133 134 /* 135 * make an extended cdb AHS 136 */ 137 static int iscsi_prep_ecdb_ahs(struct iscsi_task *task) 138 { 139 struct scsi_cmnd *cmd = task->sc; 140 unsigned rlen, pad_len; 141 unsigned short ahslength; 142 struct iscsi_ecdb_ahdr *ecdb_ahdr; 143 int rc; 144 145 ecdb_ahdr = iscsi_next_hdr(task); 146 rlen = cmd->cmd_len - ISCSI_CDB_SIZE; 147 148 BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb)); 149 ahslength = rlen + sizeof(ecdb_ahdr->reserved); 150 151 pad_len = iscsi_padding(rlen); 152 153 rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) + 154 sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len); 155 if (rc) 156 return rc; 157 158 if (pad_len) 159 memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len); 160 161 ecdb_ahdr->ahslength = cpu_to_be16(ahslength); 162 ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB; 163 ecdb_ahdr->reserved = 0; 164 memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen); 165 166 debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d " 167 "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n", 168 cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len); 169 170 return 0; 171 } 172 173 static int iscsi_prep_bidi_ahs(struct iscsi_task *task) 174 { 175 struct scsi_cmnd *sc = task->sc; 176 struct iscsi_rlength_ahdr *rlen_ahdr; 177 int rc; 178 179 rlen_ahdr = iscsi_next_hdr(task); 180 rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr)); 181 if (rc) 182 return rc; 183 184 rlen_ahdr->ahslength = 185 cpu_to_be16(sizeof(rlen_ahdr->read_length) + 186 sizeof(rlen_ahdr->reserved)); 187 rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH; 188 rlen_ahdr->reserved = 0; 189 rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length); 190 191 debug_scsi("bidi-in rlen_ahdr->read_length(%d) " 192 "rlen_ahdr->ahslength(%d)\n", 193 be32_to_cpu(rlen_ahdr->read_length), 194 be16_to_cpu(rlen_ahdr->ahslength)); 195 return 0; 196 } 197 198 /** 199 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu 200 * @task: iscsi task 201 * 202 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set 203 * fields like dlength or final based on how much data it sends 204 */ 205 static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) 206 { 207 struct iscsi_conn *conn = task->conn; 208 struct iscsi_session *session = conn->session; 209 struct iscsi_cmd *hdr = task->hdr; 210 struct scsi_cmnd *sc = task->sc; 211 unsigned hdrlength, cmd_len; 212 int rc; 213 214 task->hdr_len = 0; 215 rc = iscsi_add_hdr(task, sizeof(*hdr)); 216 if (rc) 217 return rc; 218 hdr->opcode = ISCSI_OP_SCSI_CMD; 219 hdr->flags = ISCSI_ATTR_SIMPLE; 220 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 221 hdr->itt = build_itt(task->itt, session->age); 222 hdr->cmdsn = cpu_to_be32(session->cmdsn); 223 session->cmdsn++; 224 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 225 cmd_len = sc->cmd_len; 226 if (cmd_len < ISCSI_CDB_SIZE) 227 memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len); 228 else if (cmd_len > ISCSI_CDB_SIZE) { 229 rc = iscsi_prep_ecdb_ahs(task); 230 if (rc) 231 return rc; 232 cmd_len = ISCSI_CDB_SIZE; 233 } 234 memcpy(hdr->cdb, sc->cmnd, cmd_len); 235 236 task->imm_count = 0; 237 if (scsi_bidi_cmnd(sc)) { 238 hdr->flags |= ISCSI_FLAG_CMD_READ; 239 rc = iscsi_prep_bidi_ahs(task); 240 if (rc) 241 return rc; 242 } 243 if (sc->sc_data_direction == DMA_TO_DEVICE) { 244 unsigned out_len = scsi_out(sc)->length; 245 hdr->data_length = cpu_to_be32(out_len); 246 hdr->flags |= ISCSI_FLAG_CMD_WRITE; 247 /* 248 * Write counters: 249 * 250 * imm_count bytes to be sent right after 251 * SCSI PDU Header 252 * 253 * unsol_count bytes(as Data-Out) to be sent 254 * without R2T ack right after 255 * immediate data 256 * 257 * r2t_data_count bytes to be sent via R2T ack's 258 * 259 * pad_count bytes to be sent as zero-padding 260 */ 261 task->unsol_count = 0; 262 task->unsol_offset = 0; 263 task->unsol_datasn = 0; 264 265 if (session->imm_data_en) { 266 if (out_len >= session->first_burst) 267 task->imm_count = min(session->first_burst, 268 conn->max_xmit_dlength); 269 else 270 task->imm_count = min(out_len, 271 conn->max_xmit_dlength); 272 hton24(hdr->dlength, task->imm_count); 273 } else 274 zero_data(hdr->dlength); 275 276 if (!session->initial_r2t_en) { 277 task->unsol_count = min(session->first_burst, out_len) 278 - task->imm_count; 279 task->unsol_offset = task->imm_count; 280 } 281 282 if (!task->unsol_count) 283 /* No unsolicit Data-Out's */ 284 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 285 } else { 286 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 287 zero_data(hdr->dlength); 288 hdr->data_length = cpu_to_be32(scsi_in(sc)->length); 289 290 if (sc->sc_data_direction == DMA_FROM_DEVICE) 291 hdr->flags |= ISCSI_FLAG_CMD_READ; 292 } 293 294 /* calculate size of additional header segments (AHSs) */ 295 hdrlength = task->hdr_len - sizeof(*hdr); 296 297 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1)); 298 hdrlength /= ISCSI_PAD_LEN; 299 300 WARN_ON(hdrlength >= 256); 301 hdr->hlength = hdrlength & 0xFF; 302 303 if (conn->session->tt->init_task && 304 conn->session->tt->init_task(task)) 305 return -EIO; 306 307 task->state = ISCSI_TASK_RUNNING; 308 list_move_tail(&task->running, &conn->run_list); 309 310 conn->scsicmd_pdus_cnt++; 311 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d " 312 "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ? 313 "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ? 314 "write" : "read", conn->id, sc, sc->cmnd[0], task->itt, 315 scsi_bufflen(sc), 316 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, 317 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); 318 return 0; 319 } 320 321 /** 322 * iscsi_complete_command - finish a task 323 * @task: iscsi cmd task 324 * 325 * Must be called with session lock. 326 * This function returns the scsi command to scsi-ml or cleans 327 * up mgmt tasks then returns the task to the pool. 328 */ 329 static void iscsi_complete_command(struct iscsi_task *task) 330 { 331 struct iscsi_conn *conn = task->conn; 332 struct iscsi_session *session = conn->session; 333 struct scsi_cmnd *sc = task->sc; 334 335 list_del_init(&task->running); 336 task->state = ISCSI_TASK_COMPLETED; 337 task->sc = NULL; 338 339 if (conn->task == task) 340 conn->task = NULL; 341 /* 342 * login task is preallocated so do not free 343 */ 344 if (conn->login_task == task) 345 return; 346 347 __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); 348 349 if (conn->ping_task == task) 350 conn->ping_task = NULL; 351 352 if (sc) { 353 task->sc = NULL; 354 /* SCSI eh reuses commands to verify us */ 355 sc->SCp.ptr = NULL; 356 /* 357 * queue command may call this to free the task, but 358 * not have setup the sc callback 359 */ 360 if (sc->scsi_done) 361 sc->scsi_done(sc); 362 } 363 } 364 365 void __iscsi_get_task(struct iscsi_task *task) 366 { 367 atomic_inc(&task->refcount); 368 } 369 EXPORT_SYMBOL_GPL(__iscsi_get_task); 370 371 static void __iscsi_put_task(struct iscsi_task *task) 372 { 373 if (atomic_dec_and_test(&task->refcount)) 374 iscsi_complete_command(task); 375 } 376 377 void iscsi_put_task(struct iscsi_task *task) 378 { 379 struct iscsi_session *session = task->conn->session; 380 381 spin_lock_bh(&session->lock); 382 __iscsi_put_task(task); 383 spin_unlock_bh(&session->lock); 384 } 385 EXPORT_SYMBOL_GPL(iscsi_put_task); 386 387 /* 388 * session lock must be held 389 */ 390 static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, 391 int err) 392 { 393 struct scsi_cmnd *sc; 394 395 sc = task->sc; 396 if (!sc) 397 return; 398 399 if (task->state == ISCSI_TASK_PENDING) 400 /* 401 * cmd never made it to the xmit thread, so we should not count 402 * the cmd in the sequencing 403 */ 404 conn->session->queued_cmdsn--; 405 else 406 conn->session->tt->cleanup_task(conn, task); 407 408 sc->result = err; 409 if (!scsi_bidi_cmnd(sc)) 410 scsi_set_resid(sc, scsi_bufflen(sc)); 411 else { 412 scsi_out(sc)->resid = scsi_out(sc)->length; 413 scsi_in(sc)->resid = scsi_in(sc)->length; 414 } 415 416 if (conn->task == task) 417 conn->task = NULL; 418 /* release ref from queuecommand */ 419 __iscsi_put_task(task); 420 } 421 422 static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, 423 struct iscsi_task *task) 424 { 425 struct iscsi_session *session = conn->session; 426 struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr; 427 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; 428 429 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) 430 return -ENOTCONN; 431 432 if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) && 433 hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) 434 nop->exp_statsn = cpu_to_be32(conn->exp_statsn); 435 /* 436 * pre-format CmdSN for outgoing PDU. 437 */ 438 nop->cmdsn = cpu_to_be32(session->cmdsn); 439 if (hdr->itt != RESERVED_ITT) { 440 hdr->itt = build_itt(task->itt, session->age); 441 /* 442 * TODO: We always use immediate, so we never hit this. 443 * If we start to send tmfs or nops as non-immediate then 444 * we should start checking the cmdsn numbers for mgmt tasks. 445 */ 446 if (conn->c_stage == ISCSI_CONN_STARTED && 447 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 448 session->queued_cmdsn++; 449 session->cmdsn++; 450 } 451 } 452 453 if (session->tt->init_task) 454 session->tt->init_task(task); 455 456 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 457 session->state = ISCSI_STATE_LOGGING_OUT; 458 459 list_move_tail(&task->running, &conn->mgmt_run_list); 460 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n", 461 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt, 462 task->data_count); 463 return 0; 464 } 465 466 static struct iscsi_task * 467 __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 468 char *data, uint32_t data_size) 469 { 470 struct iscsi_session *session = conn->session; 471 struct iscsi_task *task; 472 473 if (session->state == ISCSI_STATE_TERMINATE) 474 return NULL; 475 476 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) || 477 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) 478 /* 479 * Login and Text are sent serially, in 480 * request-followed-by-response sequence. 481 * Same task can be used. Same ITT must be used. 482 * Note that login_task is preallocated at conn_create(). 483 */ 484 task = conn->login_task; 485 else { 486 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 487 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 488 489 if (!__kfifo_get(session->cmdpool.queue, 490 (void*)&task, sizeof(void*))) 491 return NULL; 492 } 493 /* 494 * released in complete pdu for task we expect a response for, and 495 * released by the lld when it has transmitted the task for 496 * pdus we do not expect a response for. 497 */ 498 atomic_set(&task->refcount, 1); 499 task->conn = conn; 500 task->sc = NULL; 501 502 if (data_size) { 503 memcpy(task->data, data, data_size); 504 task->data_count = data_size; 505 } else 506 task->data_count = 0; 507 508 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); 509 INIT_LIST_HEAD(&task->running); 510 list_add_tail(&task->running, &conn->mgmtqueue); 511 512 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 513 if (iscsi_prep_mgmt_task(conn, task)) { 514 __iscsi_put_task(task); 515 return NULL; 516 } 517 518 if (session->tt->xmit_task(task)) 519 task = NULL; 520 521 } else 522 scsi_queue_work(conn->session->host, &conn->xmitwork); 523 524 return task; 525 } 526 527 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 528 char *data, uint32_t data_size) 529 { 530 struct iscsi_conn *conn = cls_conn->dd_data; 531 struct iscsi_session *session = conn->session; 532 int err = 0; 533 534 spin_lock_bh(&session->lock); 535 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size)) 536 err = -EPERM; 537 spin_unlock_bh(&session->lock); 538 return err; 539 } 540 EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); 541 542 /** 543 * iscsi_cmd_rsp - SCSI Command Response processing 544 * @conn: iscsi connection 545 * @hdr: iscsi header 546 * @task: scsi command task 547 * @data: cmd data buffer 548 * @datalen: len of buffer 549 * 550 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and 551 * then completes the command and task. 552 **/ 553 static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 554 struct iscsi_task *task, char *data, 555 int datalen) 556 { 557 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr; 558 struct iscsi_session *session = conn->session; 559 struct scsi_cmnd *sc = task->sc; 560 561 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 562 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 563 564 sc->result = (DID_OK << 16) | rhdr->cmd_status; 565 566 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) { 567 sc->result = DID_ERROR << 16; 568 goto out; 569 } 570 571 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) { 572 uint16_t senselen; 573 574 if (datalen < 2) { 575 invalid_datalen: 576 iscsi_conn_printk(KERN_ERR, conn, 577 "Got CHECK_CONDITION but invalid data " 578 "buffer size of %d\n", datalen); 579 sc->result = DID_BAD_TARGET << 16; 580 goto out; 581 } 582 583 senselen = get_unaligned_be16(data); 584 if (datalen < senselen) 585 goto invalid_datalen; 586 587 memcpy(sc->sense_buffer, data + 2, 588 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); 589 debug_scsi("copied %d bytes of sense\n", 590 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); 591 } 592 593 if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW | 594 ISCSI_FLAG_CMD_BIDI_OVERFLOW)) { 595 int res_count = be32_to_cpu(rhdr->bi_residual_count); 596 597 if (scsi_bidi_cmnd(sc) && res_count > 0 && 598 (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW || 599 res_count <= scsi_in(sc)->length)) 600 scsi_in(sc)->resid = res_count; 601 else 602 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 603 } 604 605 if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW | 606 ISCSI_FLAG_CMD_OVERFLOW)) { 607 int res_count = be32_to_cpu(rhdr->residual_count); 608 609 if (res_count > 0 && 610 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || 611 res_count <= scsi_bufflen(sc))) 612 /* write side for bidi or uni-io set_resid */ 613 scsi_set_resid(sc, res_count); 614 else 615 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 616 } 617 out: 618 debug_scsi("done [sc %lx res %d itt 0x%x]\n", 619 (long)sc, sc->result, task->itt); 620 conn->scsirsp_pdus_cnt++; 621 622 __iscsi_put_task(task); 623 } 624 625 /** 626 * iscsi_data_in_rsp - SCSI Data-In Response processing 627 * @conn: iscsi connection 628 * @hdr: iscsi pdu 629 * @task: scsi command task 630 **/ 631 static void 632 iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 633 struct iscsi_task *task) 634 { 635 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr; 636 struct scsi_cmnd *sc = task->sc; 637 638 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) 639 return; 640 641 sc->result = (DID_OK << 16) | rhdr->cmd_status; 642 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 643 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | 644 ISCSI_FLAG_DATA_OVERFLOW)) { 645 int res_count = be32_to_cpu(rhdr->residual_count); 646 647 if (res_count > 0 && 648 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || 649 res_count <= scsi_in(sc)->length)) 650 scsi_in(sc)->resid = res_count; 651 else 652 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 653 } 654 655 conn->scsirsp_pdus_cnt++; 656 __iscsi_put_task(task); 657 } 658 659 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 660 { 661 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; 662 663 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 664 conn->tmfrsp_pdus_cnt++; 665 666 if (conn->tmf_state != TMF_QUEUED) 667 return; 668 669 if (tmf->response == ISCSI_TMF_RSP_COMPLETE) 670 conn->tmf_state = TMF_SUCCESS; 671 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) 672 conn->tmf_state = TMF_NOT_FOUND; 673 else 674 conn->tmf_state = TMF_FAILED; 675 wake_up(&conn->ehwait); 676 } 677 678 static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) 679 { 680 struct iscsi_nopout hdr; 681 struct iscsi_task *task; 682 683 if (!rhdr && conn->ping_task) 684 return; 685 686 memset(&hdr, 0, sizeof(struct iscsi_nopout)); 687 hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; 688 hdr.flags = ISCSI_FLAG_CMD_FINAL; 689 690 if (rhdr) { 691 memcpy(hdr.lun, rhdr->lun, 8); 692 hdr.ttt = rhdr->ttt; 693 hdr.itt = RESERVED_ITT; 694 } else 695 hdr.ttt = RESERVED_ITT; 696 697 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); 698 if (!task) 699 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); 700 else if (!rhdr) { 701 /* only track our nops */ 702 conn->ping_task = task; 703 conn->last_ping = jiffies; 704 } 705 } 706 707 static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 708 char *data, int datalen) 709 { 710 struct iscsi_reject *reject = (struct iscsi_reject *)hdr; 711 struct iscsi_hdr rejected_pdu; 712 uint32_t itt; 713 714 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1; 715 716 if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) { 717 if (ntoh24(reject->dlength) > datalen) 718 return ISCSI_ERR_PROTO; 719 720 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { 721 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); 722 itt = get_itt(rejected_pdu.itt); 723 iscsi_conn_printk(KERN_ERR, conn, 724 "itt 0x%x had pdu (op 0x%x) rejected " 725 "due to DataDigest error.\n", itt, 726 rejected_pdu.opcode); 727 } 728 } 729 return 0; 730 } 731 732 /** 733 * iscsi_itt_to_task - look up task by itt 734 * @conn: iscsi connection 735 * @itt: itt 736 * 737 * This should be used for mgmt tasks like login and nops, or if 738 * the LDD's itt space does not include the session age. 739 * 740 * The session lock must be held. 741 */ 742 static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) 743 { 744 struct iscsi_session *session = conn->session; 745 uint32_t i; 746 747 if (itt == RESERVED_ITT) 748 return NULL; 749 750 i = get_itt(itt); 751 if (i >= session->cmds_max) 752 return NULL; 753 754 return session->cmds[i]; 755 } 756 757 /** 758 * __iscsi_complete_pdu - complete pdu 759 * @conn: iscsi conn 760 * @hdr: iscsi header 761 * @data: data buffer 762 * @datalen: len of data buffer 763 * 764 * Completes pdu processing by freeing any resources allocated at 765 * queuecommand or send generic. session lock must be held and verify 766 * itt must have been called. 767 */ 768 int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 769 char *data, int datalen) 770 { 771 struct iscsi_session *session = conn->session; 772 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0; 773 struct iscsi_task *task; 774 uint32_t itt; 775 776 conn->last_recv = jiffies; 777 rc = iscsi_verify_itt(conn, hdr->itt); 778 if (rc) 779 return rc; 780 781 if (hdr->itt != RESERVED_ITT) 782 itt = get_itt(hdr->itt); 783 else 784 itt = ~0U; 785 786 debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n", 787 opcode, conn->id, itt, datalen); 788 789 if (itt == ~0U) { 790 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 791 792 switch(opcode) { 793 case ISCSI_OP_NOOP_IN: 794 if (datalen) { 795 rc = ISCSI_ERR_PROTO; 796 break; 797 } 798 799 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) 800 break; 801 802 iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr); 803 break; 804 case ISCSI_OP_REJECT: 805 rc = iscsi_handle_reject(conn, hdr, data, datalen); 806 break; 807 case ISCSI_OP_ASYNC_EVENT: 808 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 809 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 810 rc = ISCSI_ERR_CONN_FAILED; 811 break; 812 default: 813 rc = ISCSI_ERR_BAD_OPCODE; 814 break; 815 } 816 goto out; 817 } 818 819 switch(opcode) { 820 case ISCSI_OP_SCSI_CMD_RSP: 821 case ISCSI_OP_SCSI_DATA_IN: 822 task = iscsi_itt_to_ctask(conn, hdr->itt); 823 if (!task) 824 return ISCSI_ERR_BAD_ITT; 825 break; 826 case ISCSI_OP_R2T: 827 /* 828 * LLD handles R2Ts if they need to. 829 */ 830 return 0; 831 case ISCSI_OP_LOGOUT_RSP: 832 case ISCSI_OP_LOGIN_RSP: 833 case ISCSI_OP_TEXT_RSP: 834 case ISCSI_OP_SCSI_TMFUNC_RSP: 835 case ISCSI_OP_NOOP_IN: 836 task = iscsi_itt_to_task(conn, hdr->itt); 837 if (!task) 838 return ISCSI_ERR_BAD_ITT; 839 break; 840 default: 841 return ISCSI_ERR_BAD_OPCODE; 842 } 843 844 switch(opcode) { 845 case ISCSI_OP_SCSI_CMD_RSP: 846 iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen); 847 break; 848 case ISCSI_OP_SCSI_DATA_IN: 849 iscsi_data_in_rsp(conn, hdr, task); 850 break; 851 case ISCSI_OP_LOGOUT_RSP: 852 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 853 if (datalen) { 854 rc = ISCSI_ERR_PROTO; 855 break; 856 } 857 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 858 goto recv_pdu; 859 case ISCSI_OP_LOGIN_RSP: 860 case ISCSI_OP_TEXT_RSP: 861 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 862 /* 863 * login related PDU's exp_statsn is handled in 864 * userspace 865 */ 866 goto recv_pdu; 867 case ISCSI_OP_SCSI_TMFUNC_RSP: 868 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 869 if (datalen) { 870 rc = ISCSI_ERR_PROTO; 871 break; 872 } 873 874 iscsi_tmf_rsp(conn, hdr); 875 __iscsi_put_task(task); 876 break; 877 case ISCSI_OP_NOOP_IN: 878 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 879 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) { 880 rc = ISCSI_ERR_PROTO; 881 break; 882 } 883 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 884 885 if (conn->ping_task != task) 886 /* 887 * If this is not in response to one of our 888 * nops then it must be from userspace. 889 */ 890 goto recv_pdu; 891 892 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); 893 __iscsi_put_task(task); 894 break; 895 default: 896 rc = ISCSI_ERR_BAD_OPCODE; 897 break; 898 } 899 900 out: 901 return rc; 902 recv_pdu: 903 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 904 rc = ISCSI_ERR_CONN_FAILED; 905 __iscsi_put_task(task); 906 return rc; 907 } 908 EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); 909 910 int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 911 char *data, int datalen) 912 { 913 int rc; 914 915 spin_lock(&conn->session->lock); 916 rc = __iscsi_complete_pdu(conn, hdr, data, datalen); 917 spin_unlock(&conn->session->lock); 918 return rc; 919 } 920 EXPORT_SYMBOL_GPL(iscsi_complete_pdu); 921 922 int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt) 923 { 924 struct iscsi_session *session = conn->session; 925 uint32_t i; 926 927 if (itt == RESERVED_ITT) 928 return 0; 929 930 if (((__force u32)itt & ISCSI_AGE_MASK) != 931 (session->age << ISCSI_AGE_SHIFT)) { 932 iscsi_conn_printk(KERN_ERR, conn, 933 "received itt %x expected session age (%x)\n", 934 (__force u32)itt, session->age); 935 return ISCSI_ERR_BAD_ITT; 936 } 937 938 i = get_itt(itt); 939 if (i >= session->cmds_max) { 940 iscsi_conn_printk(KERN_ERR, conn, 941 "received invalid itt index %u (max cmds " 942 "%u.\n", i, session->cmds_max); 943 return ISCSI_ERR_BAD_ITT; 944 } 945 return 0; 946 } 947 EXPORT_SYMBOL_GPL(iscsi_verify_itt); 948 949 /** 950 * iscsi_itt_to_ctask - look up ctask by itt 951 * @conn: iscsi connection 952 * @itt: itt 953 * 954 * This should be used for cmd tasks. 955 * 956 * The session lock must be held. 957 */ 958 struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt) 959 { 960 struct iscsi_task *task; 961 962 if (iscsi_verify_itt(conn, itt)) 963 return NULL; 964 965 task = iscsi_itt_to_task(conn, itt); 966 if (!task || !task->sc) 967 return NULL; 968 969 if (task->sc->SCp.phase != conn->session->age) { 970 iscsi_session_printk(KERN_ERR, conn->session, 971 "task's session age %d, expected %d\n", 972 task->sc->SCp.phase, conn->session->age); 973 return NULL; 974 } 975 976 return task; 977 } 978 EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask); 979 980 void iscsi_session_failure(struct iscsi_cls_session *cls_session, 981 enum iscsi_err err) 982 { 983 struct iscsi_session *session = cls_session->dd_data; 984 struct iscsi_conn *conn; 985 struct device *dev; 986 unsigned long flags; 987 988 spin_lock_irqsave(&session->lock, flags); 989 conn = session->leadconn; 990 if (session->state == ISCSI_STATE_TERMINATE || !conn) { 991 spin_unlock_irqrestore(&session->lock, flags); 992 return; 993 } 994 995 dev = get_device(&conn->cls_conn->dev); 996 spin_unlock_irqrestore(&session->lock, flags); 997 if (!dev) 998 return; 999 /* 1000 * if the host is being removed bypass the connection 1001 * recovery initialization because we are going to kill 1002 * the session. 1003 */ 1004 if (err == ISCSI_ERR_INVALID_HOST) 1005 iscsi_conn_error_event(conn->cls_conn, err); 1006 else 1007 iscsi_conn_failure(conn, err); 1008 put_device(dev); 1009 } 1010 EXPORT_SYMBOL_GPL(iscsi_session_failure); 1011 1012 void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) 1013 { 1014 struct iscsi_session *session = conn->session; 1015 unsigned long flags; 1016 1017 spin_lock_irqsave(&session->lock, flags); 1018 if (session->state == ISCSI_STATE_FAILED) { 1019 spin_unlock_irqrestore(&session->lock, flags); 1020 return; 1021 } 1022 1023 if (conn->stop_stage == 0) 1024 session->state = ISCSI_STATE_FAILED; 1025 spin_unlock_irqrestore(&session->lock, flags); 1026 1027 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1028 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 1029 iscsi_conn_error_event(conn->cls_conn, err); 1030 } 1031 EXPORT_SYMBOL_GPL(iscsi_conn_failure); 1032 1033 static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn) 1034 { 1035 struct iscsi_session *session = conn->session; 1036 1037 /* 1038 * Check for iSCSI window and take care of CmdSN wrap-around 1039 */ 1040 if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) { 1041 debug_scsi("iSCSI CmdSN closed. ExpCmdSn %u MaxCmdSN %u " 1042 "CmdSN %u/%u\n", session->exp_cmdsn, 1043 session->max_cmdsn, session->cmdsn, 1044 session->queued_cmdsn); 1045 return -ENOSPC; 1046 } 1047 return 0; 1048 } 1049 1050 static int iscsi_xmit_task(struct iscsi_conn *conn) 1051 { 1052 struct iscsi_task *task = conn->task; 1053 int rc; 1054 1055 __iscsi_get_task(task); 1056 spin_unlock_bh(&conn->session->lock); 1057 rc = conn->session->tt->xmit_task(task); 1058 spin_lock_bh(&conn->session->lock); 1059 __iscsi_put_task(task); 1060 if (!rc) 1061 /* done with this task */ 1062 conn->task = NULL; 1063 return rc; 1064 } 1065 1066 /** 1067 * iscsi_requeue_task - requeue task to run from session workqueue 1068 * @task: task to requeue 1069 * 1070 * LLDs that need to run a task from the session workqueue should call 1071 * this. The session lock must be held. This should only be called 1072 * by software drivers. 1073 */ 1074 void iscsi_requeue_task(struct iscsi_task *task) 1075 { 1076 struct iscsi_conn *conn = task->conn; 1077 1078 list_move_tail(&task->running, &conn->requeue); 1079 scsi_queue_work(conn->session->host, &conn->xmitwork); 1080 } 1081 EXPORT_SYMBOL_GPL(iscsi_requeue_task); 1082 1083 /** 1084 * iscsi_data_xmit - xmit any command into the scheduled connection 1085 * @conn: iscsi connection 1086 * 1087 * Notes: 1088 * The function can return -EAGAIN in which case the caller must 1089 * re-schedule it again later or recover. '0' return code means 1090 * successful xmit. 1091 **/ 1092 static int iscsi_data_xmit(struct iscsi_conn *conn) 1093 { 1094 int rc = 0; 1095 1096 spin_lock_bh(&conn->session->lock); 1097 if (unlikely(conn->suspend_tx)) { 1098 debug_scsi("conn %d Tx suspended!\n", conn->id); 1099 spin_unlock_bh(&conn->session->lock); 1100 return -ENODATA; 1101 } 1102 1103 if (conn->task) { 1104 rc = iscsi_xmit_task(conn); 1105 if (rc) 1106 goto again; 1107 } 1108 1109 /* 1110 * process mgmt pdus like nops before commands since we should 1111 * only have one nop-out as a ping from us and targets should not 1112 * overflow us with nop-ins 1113 */ 1114 check_mgmt: 1115 while (!list_empty(&conn->mgmtqueue)) { 1116 conn->task = list_entry(conn->mgmtqueue.next, 1117 struct iscsi_task, running); 1118 if (iscsi_prep_mgmt_task(conn, conn->task)) { 1119 __iscsi_put_task(conn->task); 1120 conn->task = NULL; 1121 continue; 1122 } 1123 rc = iscsi_xmit_task(conn); 1124 if (rc) 1125 goto again; 1126 } 1127 1128 /* process pending command queue */ 1129 while (!list_empty(&conn->xmitqueue)) { 1130 if (conn->tmf_state == TMF_QUEUED) 1131 break; 1132 1133 conn->task = list_entry(conn->xmitqueue.next, 1134 struct iscsi_task, running); 1135 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1136 fail_command(conn, conn->task, DID_IMM_RETRY << 16); 1137 continue; 1138 } 1139 if (iscsi_prep_scsi_cmd_pdu(conn->task)) { 1140 fail_command(conn, conn->task, DID_ABORT << 16); 1141 continue; 1142 } 1143 rc = iscsi_xmit_task(conn); 1144 if (rc) 1145 goto again; 1146 /* 1147 * we could continuously get new task requests so 1148 * we need to check the mgmt queue for nops that need to 1149 * be sent to aviod starvation 1150 */ 1151 if (!list_empty(&conn->mgmtqueue)) 1152 goto check_mgmt; 1153 } 1154 1155 while (!list_empty(&conn->requeue)) { 1156 if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL) 1157 break; 1158 1159 /* 1160 * we always do fastlogout - conn stop code will clean up. 1161 */ 1162 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) 1163 break; 1164 1165 conn->task = list_entry(conn->requeue.next, 1166 struct iscsi_task, running); 1167 conn->task->state = ISCSI_TASK_RUNNING; 1168 list_move_tail(conn->requeue.next, &conn->run_list); 1169 rc = iscsi_xmit_task(conn); 1170 if (rc) 1171 goto again; 1172 if (!list_empty(&conn->mgmtqueue)) 1173 goto check_mgmt; 1174 } 1175 spin_unlock_bh(&conn->session->lock); 1176 return -ENODATA; 1177 1178 again: 1179 if (unlikely(conn->suspend_tx)) 1180 rc = -ENODATA; 1181 spin_unlock_bh(&conn->session->lock); 1182 return rc; 1183 } 1184 1185 static void iscsi_xmitworker(struct work_struct *work) 1186 { 1187 struct iscsi_conn *conn = 1188 container_of(work, struct iscsi_conn, xmitwork); 1189 int rc; 1190 /* 1191 * serialize Xmit worker on a per-connection basis. 1192 */ 1193 do { 1194 rc = iscsi_data_xmit(conn); 1195 } while (rc >= 0 || rc == -EAGAIN); 1196 } 1197 1198 enum { 1199 FAILURE_BAD_HOST = 1, 1200 FAILURE_SESSION_FAILED, 1201 FAILURE_SESSION_FREED, 1202 FAILURE_WINDOW_CLOSED, 1203 FAILURE_OOM, 1204 FAILURE_SESSION_TERMINATE, 1205 FAILURE_SESSION_IN_RECOVERY, 1206 FAILURE_SESSION_RECOVERY_TIMEOUT, 1207 FAILURE_SESSION_LOGGING_OUT, 1208 FAILURE_SESSION_NOT_READY, 1209 }; 1210 1211 int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 1212 { 1213 struct iscsi_cls_session *cls_session; 1214 struct Scsi_Host *host; 1215 int reason = 0; 1216 struct iscsi_session *session; 1217 struct iscsi_conn *conn; 1218 struct iscsi_task *task = NULL; 1219 1220 sc->scsi_done = done; 1221 sc->result = 0; 1222 sc->SCp.ptr = NULL; 1223 1224 host = sc->device->host; 1225 spin_unlock(host->host_lock); 1226 1227 cls_session = starget_to_session(scsi_target(sc->device)); 1228 session = cls_session->dd_data; 1229 spin_lock(&session->lock); 1230 1231 reason = iscsi_session_chkready(cls_session); 1232 if (reason) { 1233 sc->result = reason; 1234 goto fault; 1235 } 1236 1237 /* 1238 * ISCSI_STATE_FAILED is a temp. state. The recovery 1239 * code will decide what is best to do with command queued 1240 * during this time 1241 */ 1242 if (session->state != ISCSI_STATE_LOGGED_IN && 1243 session->state != ISCSI_STATE_FAILED) { 1244 /* 1245 * to handle the race between when we set the recovery state 1246 * and block the session we requeue here (commands could 1247 * be entering our queuecommand while a block is starting 1248 * up because the block code is not locked) 1249 */ 1250 switch (session->state) { 1251 case ISCSI_STATE_IN_RECOVERY: 1252 reason = FAILURE_SESSION_IN_RECOVERY; 1253 goto reject; 1254 case ISCSI_STATE_LOGGING_OUT: 1255 reason = FAILURE_SESSION_LOGGING_OUT; 1256 goto reject; 1257 case ISCSI_STATE_RECOVERY_FAILED: 1258 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1259 sc->result = DID_TRANSPORT_FAILFAST << 16; 1260 break; 1261 case ISCSI_STATE_TERMINATE: 1262 reason = FAILURE_SESSION_TERMINATE; 1263 sc->result = DID_NO_CONNECT << 16; 1264 break; 1265 default: 1266 reason = FAILURE_SESSION_FREED; 1267 sc->result = DID_NO_CONNECT << 16; 1268 } 1269 goto fault; 1270 } 1271 1272 conn = session->leadconn; 1273 if (!conn) { 1274 reason = FAILURE_SESSION_FREED; 1275 sc->result = DID_NO_CONNECT << 16; 1276 goto fault; 1277 } 1278 1279 if (iscsi_check_cmdsn_window_closed(conn)) { 1280 reason = FAILURE_WINDOW_CLOSED; 1281 goto reject; 1282 } 1283 1284 if (!__kfifo_get(session->cmdpool.queue, (void*)&task, 1285 sizeof(void*))) { 1286 reason = FAILURE_OOM; 1287 goto reject; 1288 } 1289 sc->SCp.phase = session->age; 1290 sc->SCp.ptr = (char *)task; 1291 1292 atomic_set(&task->refcount, 1); 1293 task->state = ISCSI_TASK_PENDING; 1294 task->conn = conn; 1295 task->sc = sc; 1296 INIT_LIST_HEAD(&task->running); 1297 list_add_tail(&task->running, &conn->xmitqueue); 1298 1299 if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { 1300 if (iscsi_prep_scsi_cmd_pdu(task)) { 1301 sc->result = DID_ABORT << 16; 1302 sc->scsi_done = NULL; 1303 iscsi_complete_command(task); 1304 goto fault; 1305 } 1306 if (session->tt->xmit_task(task)) { 1307 sc->scsi_done = NULL; 1308 iscsi_complete_command(task); 1309 reason = FAILURE_SESSION_NOT_READY; 1310 goto reject; 1311 } 1312 } else 1313 scsi_queue_work(session->host, &conn->xmitwork); 1314 1315 session->queued_cmdsn++; 1316 spin_unlock(&session->lock); 1317 spin_lock(host->host_lock); 1318 return 0; 1319 1320 reject: 1321 spin_unlock(&session->lock); 1322 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); 1323 spin_lock(host->host_lock); 1324 return SCSI_MLQUEUE_TARGET_BUSY; 1325 1326 fault: 1327 spin_unlock(&session->lock); 1328 debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason); 1329 if (!scsi_bidi_cmnd(sc)) 1330 scsi_set_resid(sc, scsi_bufflen(sc)); 1331 else { 1332 scsi_out(sc)->resid = scsi_out(sc)->length; 1333 scsi_in(sc)->resid = scsi_in(sc)->length; 1334 } 1335 done(sc); 1336 spin_lock(host->host_lock); 1337 return 0; 1338 } 1339 EXPORT_SYMBOL_GPL(iscsi_queuecommand); 1340 1341 int iscsi_change_queue_depth(struct scsi_device *sdev, int depth) 1342 { 1343 if (depth > ISCSI_MAX_CMD_PER_LUN) 1344 depth = ISCSI_MAX_CMD_PER_LUN; 1345 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 1346 return sdev->queue_depth; 1347 } 1348 EXPORT_SYMBOL_GPL(iscsi_change_queue_depth); 1349 1350 void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) 1351 { 1352 struct iscsi_session *session = cls_session->dd_data; 1353 1354 spin_lock_bh(&session->lock); 1355 if (session->state != ISCSI_STATE_LOGGED_IN) { 1356 session->state = ISCSI_STATE_RECOVERY_FAILED; 1357 if (session->leadconn) 1358 wake_up(&session->leadconn->ehwait); 1359 } 1360 spin_unlock_bh(&session->lock); 1361 } 1362 EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); 1363 1364 int iscsi_eh_target_reset(struct scsi_cmnd *sc) 1365 { 1366 struct iscsi_cls_session *cls_session; 1367 struct iscsi_session *session; 1368 struct iscsi_conn *conn; 1369 1370 cls_session = starget_to_session(scsi_target(sc->device)); 1371 session = cls_session->dd_data; 1372 conn = session->leadconn; 1373 1374 mutex_lock(&session->eh_mutex); 1375 spin_lock_bh(&session->lock); 1376 if (session->state == ISCSI_STATE_TERMINATE) { 1377 failed: 1378 debug_scsi("failing target reset: session terminated " 1379 "[CID %d age %d]\n", conn->id, session->age); 1380 spin_unlock_bh(&session->lock); 1381 mutex_unlock(&session->eh_mutex); 1382 return FAILED; 1383 } 1384 1385 spin_unlock_bh(&session->lock); 1386 mutex_unlock(&session->eh_mutex); 1387 /* 1388 * we drop the lock here but the leadconn cannot be destoyed while 1389 * we are in the scsi eh 1390 */ 1391 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1392 1393 debug_scsi("iscsi_eh_target_reset wait for relogin\n"); 1394 wait_event_interruptible(conn->ehwait, 1395 session->state == ISCSI_STATE_TERMINATE || 1396 session->state == ISCSI_STATE_LOGGED_IN || 1397 session->state == ISCSI_STATE_RECOVERY_FAILED); 1398 if (signal_pending(current)) 1399 flush_signals(current); 1400 1401 mutex_lock(&session->eh_mutex); 1402 spin_lock_bh(&session->lock); 1403 if (session->state == ISCSI_STATE_LOGGED_IN) 1404 iscsi_session_printk(KERN_INFO, session, 1405 "target reset succeeded\n"); 1406 else 1407 goto failed; 1408 spin_unlock_bh(&session->lock); 1409 mutex_unlock(&session->eh_mutex); 1410 return SUCCESS; 1411 } 1412 EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); 1413 1414 static void iscsi_tmf_timedout(unsigned long data) 1415 { 1416 struct iscsi_conn *conn = (struct iscsi_conn *)data; 1417 struct iscsi_session *session = conn->session; 1418 1419 spin_lock(&session->lock); 1420 if (conn->tmf_state == TMF_QUEUED) { 1421 conn->tmf_state = TMF_TIMEDOUT; 1422 debug_scsi("tmf timedout\n"); 1423 /* unblock eh_abort() */ 1424 wake_up(&conn->ehwait); 1425 } 1426 spin_unlock(&session->lock); 1427 } 1428 1429 static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, 1430 struct iscsi_tm *hdr, int age, 1431 int timeout) 1432 { 1433 struct iscsi_session *session = conn->session; 1434 struct iscsi_task *task; 1435 1436 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, 1437 NULL, 0); 1438 if (!task) { 1439 spin_unlock_bh(&session->lock); 1440 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1441 spin_lock_bh(&session->lock); 1442 debug_scsi("tmf exec failure\n"); 1443 return -EPERM; 1444 } 1445 conn->tmfcmd_pdus_cnt++; 1446 conn->tmf_timer.expires = timeout * HZ + jiffies; 1447 conn->tmf_timer.function = iscsi_tmf_timedout; 1448 conn->tmf_timer.data = (unsigned long)conn; 1449 add_timer(&conn->tmf_timer); 1450 debug_scsi("tmf set timeout\n"); 1451 1452 spin_unlock_bh(&session->lock); 1453 mutex_unlock(&session->eh_mutex); 1454 1455 /* 1456 * block eh thread until: 1457 * 1458 * 1) tmf response 1459 * 2) tmf timeout 1460 * 3) session is terminated or restarted or userspace has 1461 * given up on recovery 1462 */ 1463 wait_event_interruptible(conn->ehwait, age != session->age || 1464 session->state != ISCSI_STATE_LOGGED_IN || 1465 conn->tmf_state != TMF_QUEUED); 1466 if (signal_pending(current)) 1467 flush_signals(current); 1468 del_timer_sync(&conn->tmf_timer); 1469 1470 mutex_lock(&session->eh_mutex); 1471 spin_lock_bh(&session->lock); 1472 /* if the session drops it will clean up the task */ 1473 if (age != session->age || 1474 session->state != ISCSI_STATE_LOGGED_IN) 1475 return -ENOTCONN; 1476 return 0; 1477 } 1478 1479 /* 1480 * Fail commands. session lock held and recv side suspended and xmit 1481 * thread flushed 1482 */ 1483 static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, 1484 int error) 1485 { 1486 struct iscsi_task *task, *tmp; 1487 1488 if (conn->task && (conn->task->sc->device->lun == lun || lun == -1)) 1489 conn->task = NULL; 1490 1491 /* flush pending */ 1492 list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) { 1493 if (lun == task->sc->device->lun || lun == -1) { 1494 debug_scsi("failing pending sc %p itt 0x%x\n", 1495 task->sc, task->itt); 1496 fail_command(conn, task, error << 16); 1497 } 1498 } 1499 1500 list_for_each_entry_safe(task, tmp, &conn->requeue, running) { 1501 if (lun == task->sc->device->lun || lun == -1) { 1502 debug_scsi("failing requeued sc %p itt 0x%x\n", 1503 task->sc, task->itt); 1504 fail_command(conn, task, error << 16); 1505 } 1506 } 1507 1508 /* fail all other running */ 1509 list_for_each_entry_safe(task, tmp, &conn->run_list, running) { 1510 if (lun == task->sc->device->lun || lun == -1) { 1511 debug_scsi("failing in progress sc %p itt 0x%x\n", 1512 task->sc, task->itt); 1513 fail_command(conn, task, error << 16); 1514 } 1515 } 1516 } 1517 1518 void iscsi_suspend_tx(struct iscsi_conn *conn) 1519 { 1520 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1521 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1522 scsi_flush_work(conn->session->host); 1523 } 1524 EXPORT_SYMBOL_GPL(iscsi_suspend_tx); 1525 1526 static void iscsi_start_tx(struct iscsi_conn *conn) 1527 { 1528 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1529 if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) 1530 scsi_queue_work(conn->session->host, &conn->xmitwork); 1531 } 1532 1533 static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1534 { 1535 struct iscsi_cls_session *cls_session; 1536 struct iscsi_session *session; 1537 struct iscsi_conn *conn; 1538 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; 1539 1540 cls_session = starget_to_session(scsi_target(scmd->device)); 1541 session = cls_session->dd_data; 1542 1543 debug_scsi("scsi cmd %p timedout\n", scmd); 1544 1545 spin_lock(&session->lock); 1546 if (session->state != ISCSI_STATE_LOGGED_IN) { 1547 /* 1548 * We are probably in the middle of iscsi recovery so let 1549 * that complete and handle the error. 1550 */ 1551 rc = BLK_EH_RESET_TIMER; 1552 goto done; 1553 } 1554 1555 conn = session->leadconn; 1556 if (!conn) { 1557 /* In the middle of shuting down */ 1558 rc = BLK_EH_RESET_TIMER; 1559 goto done; 1560 } 1561 1562 if (!conn->recv_timeout && !conn->ping_timeout) 1563 goto done; 1564 /* 1565 * if the ping timedout then we are in the middle of cleaning up 1566 * and can let the iscsi eh handle it 1567 */ 1568 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 1569 (conn->ping_timeout * HZ), jiffies)) 1570 rc = BLK_EH_RESET_TIMER; 1571 /* 1572 * if we are about to check the transport then give the command 1573 * more time 1574 */ 1575 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1576 jiffies)) 1577 rc = BLK_EH_RESET_TIMER; 1578 /* if in the middle of checking the transport then give us more time */ 1579 if (conn->ping_task) 1580 rc = BLK_EH_RESET_TIMER; 1581 done: 1582 spin_unlock(&session->lock); 1583 debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ? 1584 "timer reset" : "nh"); 1585 return rc; 1586 } 1587 1588 static void iscsi_check_transport_timeouts(unsigned long data) 1589 { 1590 struct iscsi_conn *conn = (struct iscsi_conn *)data; 1591 struct iscsi_session *session = conn->session; 1592 unsigned long recv_timeout, next_timeout = 0, last_recv; 1593 1594 spin_lock(&session->lock); 1595 if (session->state != ISCSI_STATE_LOGGED_IN) 1596 goto done; 1597 1598 recv_timeout = conn->recv_timeout; 1599 if (!recv_timeout) 1600 goto done; 1601 1602 recv_timeout *= HZ; 1603 last_recv = conn->last_recv; 1604 if (conn->ping_task && 1605 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), 1606 jiffies)) { 1607 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " 1608 "expired, last rx %lu, last ping %lu, " 1609 "now %lu\n", conn->ping_timeout, last_recv, 1610 conn->last_ping, jiffies); 1611 spin_unlock(&session->lock); 1612 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1613 return; 1614 } 1615 1616 if (time_before_eq(last_recv + recv_timeout, jiffies)) { 1617 /* send a ping to try to provoke some traffic */ 1618 debug_scsi("Sending nopout as ping on conn %p\n", conn); 1619 iscsi_send_nopout(conn, NULL); 1620 next_timeout = conn->last_ping + (conn->ping_timeout * HZ); 1621 } else 1622 next_timeout = last_recv + recv_timeout; 1623 1624 debug_scsi("Setting next tmo %lu\n", next_timeout); 1625 mod_timer(&conn->transport_timer, next_timeout); 1626 done: 1627 spin_unlock(&session->lock); 1628 } 1629 1630 static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, 1631 struct iscsi_tm *hdr) 1632 { 1633 memset(hdr, 0, sizeof(*hdr)); 1634 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 1635 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; 1636 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 1637 memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun)); 1638 hdr->rtt = task->hdr->itt; 1639 hdr->refcmdsn = task->hdr->cmdsn; 1640 } 1641 1642 int iscsi_eh_abort(struct scsi_cmnd *sc) 1643 { 1644 struct iscsi_cls_session *cls_session; 1645 struct iscsi_session *session; 1646 struct iscsi_conn *conn; 1647 struct iscsi_task *task; 1648 struct iscsi_tm *hdr; 1649 int rc, age; 1650 1651 cls_session = starget_to_session(scsi_target(sc->device)); 1652 session = cls_session->dd_data; 1653 1654 mutex_lock(&session->eh_mutex); 1655 spin_lock_bh(&session->lock); 1656 /* 1657 * if session was ISCSI_STATE_IN_RECOVERY then we may not have 1658 * got the command. 1659 */ 1660 if (!sc->SCp.ptr) { 1661 debug_scsi("sc never reached iscsi layer or it completed.\n"); 1662 spin_unlock_bh(&session->lock); 1663 mutex_unlock(&session->eh_mutex); 1664 return SUCCESS; 1665 } 1666 1667 /* 1668 * If we are not logged in or we have started a new session 1669 * then let the host reset code handle this 1670 */ 1671 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN || 1672 sc->SCp.phase != session->age) { 1673 spin_unlock_bh(&session->lock); 1674 mutex_unlock(&session->eh_mutex); 1675 return FAILED; 1676 } 1677 1678 conn = session->leadconn; 1679 conn->eh_abort_cnt++; 1680 age = session->age; 1681 1682 task = (struct iscsi_task *)sc->SCp.ptr; 1683 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt); 1684 1685 /* task completed before time out */ 1686 if (!task->sc) { 1687 debug_scsi("sc completed while abort in progress\n"); 1688 goto success; 1689 } 1690 1691 if (task->state == ISCSI_TASK_PENDING) { 1692 fail_command(conn, task, DID_ABORT << 16); 1693 goto success; 1694 } 1695 1696 /* only have one tmf outstanding at a time */ 1697 if (conn->tmf_state != TMF_INITIAL) 1698 goto failed; 1699 conn->tmf_state = TMF_QUEUED; 1700 1701 hdr = &conn->tmhdr; 1702 iscsi_prep_abort_task_pdu(task, hdr); 1703 1704 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) { 1705 rc = FAILED; 1706 goto failed; 1707 } 1708 1709 switch (conn->tmf_state) { 1710 case TMF_SUCCESS: 1711 spin_unlock_bh(&session->lock); 1712 /* 1713 * stop tx side incase the target had sent a abort rsp but 1714 * the initiator was still writing out data. 1715 */ 1716 iscsi_suspend_tx(conn); 1717 /* 1718 * we do not stop the recv side because targets have been 1719 * good and have never sent us a successful tmf response 1720 * then sent more data for the cmd. 1721 */ 1722 spin_lock(&session->lock); 1723 fail_command(conn, task, DID_ABORT << 16); 1724 conn->tmf_state = TMF_INITIAL; 1725 spin_unlock(&session->lock); 1726 iscsi_start_tx(conn); 1727 goto success_unlocked; 1728 case TMF_TIMEDOUT: 1729 spin_unlock_bh(&session->lock); 1730 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1731 goto failed_unlocked; 1732 case TMF_NOT_FOUND: 1733 if (!sc->SCp.ptr) { 1734 conn->tmf_state = TMF_INITIAL; 1735 /* task completed before tmf abort response */ 1736 debug_scsi("sc completed while abort in progress\n"); 1737 goto success; 1738 } 1739 /* fall through */ 1740 default: 1741 conn->tmf_state = TMF_INITIAL; 1742 goto failed; 1743 } 1744 1745 success: 1746 spin_unlock_bh(&session->lock); 1747 success_unlocked: 1748 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt); 1749 mutex_unlock(&session->eh_mutex); 1750 return SUCCESS; 1751 1752 failed: 1753 spin_unlock_bh(&session->lock); 1754 failed_unlocked: 1755 debug_scsi("abort failed [sc %p itt 0x%x]\n", sc, 1756 task ? task->itt : 0); 1757 mutex_unlock(&session->eh_mutex); 1758 return FAILED; 1759 } 1760 EXPORT_SYMBOL_GPL(iscsi_eh_abort); 1761 1762 static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) 1763 { 1764 memset(hdr, 0, sizeof(*hdr)); 1765 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 1766 hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK; 1767 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 1768 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 1769 hdr->rtt = RESERVED_ITT; 1770 } 1771 1772 int iscsi_eh_device_reset(struct scsi_cmnd *sc) 1773 { 1774 struct iscsi_cls_session *cls_session; 1775 struct iscsi_session *session; 1776 struct iscsi_conn *conn; 1777 struct iscsi_tm *hdr; 1778 int rc = FAILED; 1779 1780 cls_session = starget_to_session(scsi_target(sc->device)); 1781 session = cls_session->dd_data; 1782 1783 debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun); 1784 1785 mutex_lock(&session->eh_mutex); 1786 spin_lock_bh(&session->lock); 1787 /* 1788 * Just check if we are not logged in. We cannot check for 1789 * the phase because the reset could come from a ioctl. 1790 */ 1791 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) 1792 goto unlock; 1793 conn = session->leadconn; 1794 1795 /* only have one tmf outstanding at a time */ 1796 if (conn->tmf_state != TMF_INITIAL) 1797 goto unlock; 1798 conn->tmf_state = TMF_QUEUED; 1799 1800 hdr = &conn->tmhdr; 1801 iscsi_prep_lun_reset_pdu(sc, hdr); 1802 1803 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, 1804 session->lu_reset_timeout)) { 1805 rc = FAILED; 1806 goto unlock; 1807 } 1808 1809 switch (conn->tmf_state) { 1810 case TMF_SUCCESS: 1811 break; 1812 case TMF_TIMEDOUT: 1813 spin_unlock_bh(&session->lock); 1814 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1815 goto done; 1816 default: 1817 conn->tmf_state = TMF_INITIAL; 1818 goto unlock; 1819 } 1820 1821 rc = SUCCESS; 1822 spin_unlock_bh(&session->lock); 1823 1824 iscsi_suspend_tx(conn); 1825 1826 spin_lock_bh(&session->lock); 1827 fail_all_commands(conn, sc->device->lun, DID_ERROR); 1828 conn->tmf_state = TMF_INITIAL; 1829 spin_unlock_bh(&session->lock); 1830 1831 iscsi_start_tx(conn); 1832 goto done; 1833 1834 unlock: 1835 spin_unlock_bh(&session->lock); 1836 done: 1837 debug_scsi("iscsi_eh_device_reset %s\n", 1838 rc == SUCCESS ? "SUCCESS" : "FAILED"); 1839 mutex_unlock(&session->eh_mutex); 1840 return rc; 1841 } 1842 EXPORT_SYMBOL_GPL(iscsi_eh_device_reset); 1843 1844 /* 1845 * Pre-allocate a pool of @max items of @item_size. By default, the pool 1846 * should be accessed via kfifo_{get,put} on q->queue. 1847 * Optionally, the caller can obtain the array of object pointers 1848 * by passing in a non-NULL @items pointer 1849 */ 1850 int 1851 iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) 1852 { 1853 int i, num_arrays = 1; 1854 1855 memset(q, 0, sizeof(*q)); 1856 1857 q->max = max; 1858 1859 /* If the user passed an items pointer, he wants a copy of 1860 * the array. */ 1861 if (items) 1862 num_arrays++; 1863 q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL); 1864 if (q->pool == NULL) 1865 goto enomem; 1866 1867 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*), 1868 GFP_KERNEL, NULL); 1869 if (q->queue == ERR_PTR(-ENOMEM)) 1870 goto enomem; 1871 1872 for (i = 0; i < max; i++) { 1873 q->pool[i] = kzalloc(item_size, GFP_KERNEL); 1874 if (q->pool[i] == NULL) { 1875 q->max = i; 1876 goto enomem; 1877 } 1878 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*)); 1879 } 1880 1881 if (items) { 1882 *items = q->pool + max; 1883 memcpy(*items, q->pool, max * sizeof(void *)); 1884 } 1885 1886 return 0; 1887 1888 enomem: 1889 iscsi_pool_free(q); 1890 return -ENOMEM; 1891 } 1892 EXPORT_SYMBOL_GPL(iscsi_pool_init); 1893 1894 void iscsi_pool_free(struct iscsi_pool *q) 1895 { 1896 int i; 1897 1898 for (i = 0; i < q->max; i++) 1899 kfree(q->pool[i]); 1900 if (q->pool) 1901 kfree(q->pool); 1902 } 1903 EXPORT_SYMBOL_GPL(iscsi_pool_free); 1904 1905 /** 1906 * iscsi_host_add - add host to system 1907 * @shost: scsi host 1908 * @pdev: parent device 1909 * 1910 * This should be called by partial offload and software iscsi drivers 1911 * to add a host to the system. 1912 */ 1913 int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev) 1914 { 1915 if (!shost->can_queue) 1916 shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; 1917 1918 return scsi_add_host(shost, pdev); 1919 } 1920 EXPORT_SYMBOL_GPL(iscsi_host_add); 1921 1922 /** 1923 * iscsi_host_alloc - allocate a host and driver data 1924 * @sht: scsi host template 1925 * @dd_data_size: driver host data size 1926 * @qdepth: default device queue depth 1927 * 1928 * This should be called by partial offload and software iscsi drivers. 1929 * To access the driver specific memory use the iscsi_host_priv() macro. 1930 */ 1931 struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, 1932 int dd_data_size, uint16_t qdepth) 1933 { 1934 struct Scsi_Host *shost; 1935 struct iscsi_host *ihost; 1936 1937 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); 1938 if (!shost) 1939 return NULL; 1940 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; 1941 1942 if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) { 1943 if (qdepth != 0) 1944 printk(KERN_ERR "iscsi: invalid queue depth of %d. " 1945 "Queue depth must be between 1 and %d.\n", 1946 qdepth, ISCSI_MAX_CMD_PER_LUN); 1947 qdepth = ISCSI_DEF_CMD_PER_LUN; 1948 } 1949 shost->cmd_per_lun = qdepth; 1950 1951 ihost = shost_priv(shost); 1952 spin_lock_init(&ihost->lock); 1953 ihost->state = ISCSI_HOST_SETUP; 1954 ihost->num_sessions = 0; 1955 init_waitqueue_head(&ihost->session_removal_wq); 1956 return shost; 1957 } 1958 EXPORT_SYMBOL_GPL(iscsi_host_alloc); 1959 1960 static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session) 1961 { 1962 iscsi_session_failure(cls_session, ISCSI_ERR_INVALID_HOST); 1963 } 1964 1965 /** 1966 * iscsi_host_remove - remove host and sessions 1967 * @shost: scsi host 1968 * 1969 * If there are any sessions left, this will initiate the removal and wait 1970 * for the completion. 1971 */ 1972 void iscsi_host_remove(struct Scsi_Host *shost) 1973 { 1974 struct iscsi_host *ihost = shost_priv(shost); 1975 unsigned long flags; 1976 1977 spin_lock_irqsave(&ihost->lock, flags); 1978 ihost->state = ISCSI_HOST_REMOVED; 1979 spin_unlock_irqrestore(&ihost->lock, flags); 1980 1981 iscsi_host_for_each_session(shost, iscsi_notify_host_removed); 1982 wait_event_interruptible(ihost->session_removal_wq, 1983 ihost->num_sessions == 0); 1984 if (signal_pending(current)) 1985 flush_signals(current); 1986 1987 scsi_remove_host(shost); 1988 } 1989 EXPORT_SYMBOL_GPL(iscsi_host_remove); 1990 1991 void iscsi_host_free(struct Scsi_Host *shost) 1992 { 1993 struct iscsi_host *ihost = shost_priv(shost); 1994 1995 kfree(ihost->netdev); 1996 kfree(ihost->hwaddress); 1997 kfree(ihost->initiatorname); 1998 scsi_host_put(shost); 1999 } 2000 EXPORT_SYMBOL_GPL(iscsi_host_free); 2001 2002 static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost) 2003 { 2004 struct iscsi_host *ihost = shost_priv(shost); 2005 unsigned long flags; 2006 2007 shost = scsi_host_get(shost); 2008 if (!shost) { 2009 printk(KERN_ERR "Invalid state. Cannot notify host removal " 2010 "of session teardown event because host already " 2011 "removed.\n"); 2012 return; 2013 } 2014 2015 spin_lock_irqsave(&ihost->lock, flags); 2016 ihost->num_sessions--; 2017 if (ihost->num_sessions == 0) 2018 wake_up(&ihost->session_removal_wq); 2019 spin_unlock_irqrestore(&ihost->lock, flags); 2020 scsi_host_put(shost); 2021 } 2022 2023 /** 2024 * iscsi_session_setup - create iscsi cls session and host and session 2025 * @iscsit: iscsi transport template 2026 * @shost: scsi host 2027 * @cmds_max: session can queue 2028 * @cmd_task_size: LLD task private data size 2029 * @initial_cmdsn: initial CmdSN 2030 * 2031 * This can be used by software iscsi_transports that allocate 2032 * a session per scsi host. 2033 * 2034 * Callers should set cmds_max to the largest total numer (mgmt + scsi) of 2035 * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks 2036 * for nop handling and login/logout requests. 2037 */ 2038 struct iscsi_cls_session * 2039 iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, 2040 uint16_t cmds_max, int cmd_task_size, 2041 uint32_t initial_cmdsn, unsigned int id) 2042 { 2043 struct iscsi_host *ihost = shost_priv(shost); 2044 struct iscsi_session *session; 2045 struct iscsi_cls_session *cls_session; 2046 int cmd_i, scsi_cmds, total_cmds = cmds_max; 2047 unsigned long flags; 2048 2049 spin_lock_irqsave(&ihost->lock, flags); 2050 if (ihost->state == ISCSI_HOST_REMOVED) { 2051 spin_unlock_irqrestore(&ihost->lock, flags); 2052 return NULL; 2053 } 2054 ihost->num_sessions++; 2055 spin_unlock_irqrestore(&ihost->lock, flags); 2056 2057 if (!total_cmds) 2058 total_cmds = ISCSI_DEF_XMIT_CMDS_MAX; 2059 /* 2060 * The iscsi layer needs some tasks for nop handling and tmfs, 2061 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX 2062 * + 1 command for scsi IO. 2063 */ 2064 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { 2065 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " 2066 "must be a power of two that is at least %d.\n", 2067 total_cmds, ISCSI_TOTAL_CMDS_MIN); 2068 goto dec_session_count; 2069 } 2070 2071 if (total_cmds > ISCSI_TOTAL_CMDS_MAX) { 2072 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " 2073 "must be a power of 2 less than or equal to %d.\n", 2074 cmds_max, ISCSI_TOTAL_CMDS_MAX); 2075 total_cmds = ISCSI_TOTAL_CMDS_MAX; 2076 } 2077 2078 if (!is_power_of_2(total_cmds)) { 2079 printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " 2080 "must be a power of 2.\n", total_cmds); 2081 total_cmds = rounddown_pow_of_two(total_cmds); 2082 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) 2083 return NULL; 2084 printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n", 2085 total_cmds); 2086 } 2087 scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX; 2088 2089 cls_session = iscsi_alloc_session(shost, iscsit, 2090 sizeof(struct iscsi_session)); 2091 if (!cls_session) 2092 goto dec_session_count; 2093 session = cls_session->dd_data; 2094 session->cls_session = cls_session; 2095 session->host = shost; 2096 session->state = ISCSI_STATE_FREE; 2097 session->fast_abort = 1; 2098 session->lu_reset_timeout = 15; 2099 session->abort_timeout = 10; 2100 session->scsi_cmds_max = scsi_cmds; 2101 session->cmds_max = total_cmds; 2102 session->queued_cmdsn = session->cmdsn = initial_cmdsn; 2103 session->exp_cmdsn = initial_cmdsn + 1; 2104 session->max_cmdsn = initial_cmdsn + 1; 2105 session->max_r2t = 1; 2106 session->tt = iscsit; 2107 mutex_init(&session->eh_mutex); 2108 spin_lock_init(&session->lock); 2109 2110 /* initialize SCSI PDU commands pool */ 2111 if (iscsi_pool_init(&session->cmdpool, session->cmds_max, 2112 (void***)&session->cmds, 2113 cmd_task_size + sizeof(struct iscsi_task))) 2114 goto cmdpool_alloc_fail; 2115 2116 /* pre-format cmds pool with ITT */ 2117 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { 2118 struct iscsi_task *task = session->cmds[cmd_i]; 2119 2120 if (cmd_task_size) 2121 task->dd_data = &task[1]; 2122 task->itt = cmd_i; 2123 INIT_LIST_HEAD(&task->running); 2124 } 2125 2126 if (!try_module_get(iscsit->owner)) 2127 goto module_get_fail; 2128 2129 if (iscsi_add_session(cls_session, id)) 2130 goto cls_session_fail; 2131 2132 return cls_session; 2133 2134 cls_session_fail: 2135 module_put(iscsit->owner); 2136 module_get_fail: 2137 iscsi_pool_free(&session->cmdpool); 2138 cmdpool_alloc_fail: 2139 iscsi_free_session(cls_session); 2140 dec_session_count: 2141 iscsi_host_dec_session_cnt(shost); 2142 return NULL; 2143 } 2144 EXPORT_SYMBOL_GPL(iscsi_session_setup); 2145 2146 /** 2147 * iscsi_session_teardown - destroy session, host, and cls_session 2148 * @cls_session: iscsi session 2149 * 2150 * The driver must have called iscsi_remove_session before 2151 * calling this. 2152 */ 2153 void iscsi_session_teardown(struct iscsi_cls_session *cls_session) 2154 { 2155 struct iscsi_session *session = cls_session->dd_data; 2156 struct module *owner = cls_session->transport->owner; 2157 struct Scsi_Host *shost = session->host; 2158 2159 iscsi_pool_free(&session->cmdpool); 2160 2161 kfree(session->password); 2162 kfree(session->password_in); 2163 kfree(session->username); 2164 kfree(session->username_in); 2165 kfree(session->targetname); 2166 kfree(session->initiatorname); 2167 kfree(session->ifacename); 2168 2169 iscsi_destroy_session(cls_session); 2170 iscsi_host_dec_session_cnt(shost); 2171 module_put(owner); 2172 } 2173 EXPORT_SYMBOL_GPL(iscsi_session_teardown); 2174 2175 /** 2176 * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn 2177 * @cls_session: iscsi_cls_session 2178 * @dd_size: private driver data size 2179 * @conn_idx: cid 2180 */ 2181 struct iscsi_cls_conn * 2182 iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, 2183 uint32_t conn_idx) 2184 { 2185 struct iscsi_session *session = cls_session->dd_data; 2186 struct iscsi_conn *conn; 2187 struct iscsi_cls_conn *cls_conn; 2188 char *data; 2189 2190 cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size, 2191 conn_idx); 2192 if (!cls_conn) 2193 return NULL; 2194 conn = cls_conn->dd_data; 2195 memset(conn, 0, sizeof(*conn) + dd_size); 2196 2197 conn->dd_data = cls_conn->dd_data + sizeof(*conn); 2198 conn->session = session; 2199 conn->cls_conn = cls_conn; 2200 conn->c_stage = ISCSI_CONN_INITIAL_STAGE; 2201 conn->id = conn_idx; 2202 conn->exp_statsn = 0; 2203 conn->tmf_state = TMF_INITIAL; 2204 2205 init_timer(&conn->transport_timer); 2206 conn->transport_timer.data = (unsigned long)conn; 2207 conn->transport_timer.function = iscsi_check_transport_timeouts; 2208 2209 INIT_LIST_HEAD(&conn->run_list); 2210 INIT_LIST_HEAD(&conn->mgmt_run_list); 2211 INIT_LIST_HEAD(&conn->mgmtqueue); 2212 INIT_LIST_HEAD(&conn->xmitqueue); 2213 INIT_LIST_HEAD(&conn->requeue); 2214 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 2215 2216 /* allocate login_task used for the login/text sequences */ 2217 spin_lock_bh(&session->lock); 2218 if (!__kfifo_get(session->cmdpool.queue, 2219 (void*)&conn->login_task, 2220 sizeof(void*))) { 2221 spin_unlock_bh(&session->lock); 2222 goto login_task_alloc_fail; 2223 } 2224 spin_unlock_bh(&session->lock); 2225 2226 data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL); 2227 if (!data) 2228 goto login_task_data_alloc_fail; 2229 conn->login_task->data = conn->data = data; 2230 2231 init_timer(&conn->tmf_timer); 2232 init_waitqueue_head(&conn->ehwait); 2233 2234 return cls_conn; 2235 2236 login_task_data_alloc_fail: 2237 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, 2238 sizeof(void*)); 2239 login_task_alloc_fail: 2240 iscsi_destroy_conn(cls_conn); 2241 return NULL; 2242 } 2243 EXPORT_SYMBOL_GPL(iscsi_conn_setup); 2244 2245 /** 2246 * iscsi_conn_teardown - teardown iscsi connection 2247 * cls_conn: iscsi class connection 2248 * 2249 * TODO: we may need to make this into a two step process 2250 * like scsi-mls remove + put host 2251 */ 2252 void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) 2253 { 2254 struct iscsi_conn *conn = cls_conn->dd_data; 2255 struct iscsi_session *session = conn->session; 2256 unsigned long flags; 2257 2258 del_timer_sync(&conn->transport_timer); 2259 2260 spin_lock_bh(&session->lock); 2261 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 2262 if (session->leadconn == conn) { 2263 /* 2264 * leading connection? then give up on recovery. 2265 */ 2266 session->state = ISCSI_STATE_TERMINATE; 2267 wake_up(&conn->ehwait); 2268 } 2269 spin_unlock_bh(&session->lock); 2270 2271 /* 2272 * Block until all in-progress commands for this connection 2273 * time out or fail. 2274 */ 2275 for (;;) { 2276 spin_lock_irqsave(session->host->host_lock, flags); 2277 if (!session->host->host_busy) { /* OK for ERL == 0 */ 2278 spin_unlock_irqrestore(session->host->host_lock, flags); 2279 break; 2280 } 2281 spin_unlock_irqrestore(session->host->host_lock, flags); 2282 msleep_interruptible(500); 2283 iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): " 2284 "host_busy %d host_failed %d\n", 2285 session->host->host_busy, 2286 session->host->host_failed); 2287 /* 2288 * force eh_abort() to unblock 2289 */ 2290 wake_up(&conn->ehwait); 2291 } 2292 2293 /* flush queued up work because we free the connection below */ 2294 iscsi_suspend_tx(conn); 2295 2296 spin_lock_bh(&session->lock); 2297 kfree(conn->data); 2298 kfree(conn->persistent_address); 2299 __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, 2300 sizeof(void*)); 2301 if (session->leadconn == conn) 2302 session->leadconn = NULL; 2303 spin_unlock_bh(&session->lock); 2304 2305 iscsi_destroy_conn(cls_conn); 2306 } 2307 EXPORT_SYMBOL_GPL(iscsi_conn_teardown); 2308 2309 int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) 2310 { 2311 struct iscsi_conn *conn = cls_conn->dd_data; 2312 struct iscsi_session *session = conn->session; 2313 2314 if (!session) { 2315 iscsi_conn_printk(KERN_ERR, conn, 2316 "can't start unbound connection\n"); 2317 return -EPERM; 2318 } 2319 2320 if ((session->imm_data_en || !session->initial_r2t_en) && 2321 session->first_burst > session->max_burst) { 2322 iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: " 2323 "first_burst %d max_burst %d\n", 2324 session->first_burst, session->max_burst); 2325 return -EINVAL; 2326 } 2327 2328 if (conn->ping_timeout && !conn->recv_timeout) { 2329 iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of " 2330 "zero. Using 5 seconds\n."); 2331 conn->recv_timeout = 5; 2332 } 2333 2334 if (conn->recv_timeout && !conn->ping_timeout) { 2335 iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of " 2336 "zero. Using 5 seconds.\n"); 2337 conn->ping_timeout = 5; 2338 } 2339 2340 spin_lock_bh(&session->lock); 2341 conn->c_stage = ISCSI_CONN_STARTED; 2342 session->state = ISCSI_STATE_LOGGED_IN; 2343 session->queued_cmdsn = session->cmdsn; 2344 2345 conn->last_recv = jiffies; 2346 conn->last_ping = jiffies; 2347 if (conn->recv_timeout && conn->ping_timeout) 2348 mod_timer(&conn->transport_timer, 2349 jiffies + (conn->recv_timeout * HZ)); 2350 2351 switch(conn->stop_stage) { 2352 case STOP_CONN_RECOVER: 2353 /* 2354 * unblock eh_abort() if it is blocked. re-try all 2355 * commands after successful recovery 2356 */ 2357 conn->stop_stage = 0; 2358 conn->tmf_state = TMF_INITIAL; 2359 session->age++; 2360 if (session->age == 16) 2361 session->age = 0; 2362 break; 2363 case STOP_CONN_TERM: 2364 conn->stop_stage = 0; 2365 break; 2366 default: 2367 break; 2368 } 2369 spin_unlock_bh(&session->lock); 2370 2371 iscsi_unblock_session(session->cls_session); 2372 wake_up(&conn->ehwait); 2373 return 0; 2374 } 2375 EXPORT_SYMBOL_GPL(iscsi_conn_start); 2376 2377 static void 2378 flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) 2379 { 2380 struct iscsi_task *task, *tmp; 2381 2382 /* handle pending */ 2383 list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) { 2384 debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt); 2385 /* release ref from prep task */ 2386 __iscsi_put_task(task); 2387 } 2388 2389 /* handle running */ 2390 list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) { 2391 debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt); 2392 /* release ref from prep task */ 2393 __iscsi_put_task(task); 2394 } 2395 2396 conn->task = NULL; 2397 } 2398 2399 static void iscsi_start_session_recovery(struct iscsi_session *session, 2400 struct iscsi_conn *conn, int flag) 2401 { 2402 int old_stop_stage; 2403 2404 del_timer_sync(&conn->transport_timer); 2405 2406 mutex_lock(&session->eh_mutex); 2407 spin_lock_bh(&session->lock); 2408 if (conn->stop_stage == STOP_CONN_TERM) { 2409 spin_unlock_bh(&session->lock); 2410 mutex_unlock(&session->eh_mutex); 2411 return; 2412 } 2413 2414 /* 2415 * When this is called for the in_login state, we only want to clean 2416 * up the login task and connection. We do not need to block and set 2417 * the recovery state again 2418 */ 2419 if (flag == STOP_CONN_TERM) 2420 session->state = ISCSI_STATE_TERMINATE; 2421 else if (conn->stop_stage != STOP_CONN_RECOVER) 2422 session->state = ISCSI_STATE_IN_RECOVERY; 2423 2424 old_stop_stage = conn->stop_stage; 2425 conn->stop_stage = flag; 2426 conn->c_stage = ISCSI_CONN_STOPPED; 2427 spin_unlock_bh(&session->lock); 2428 2429 iscsi_suspend_tx(conn); 2430 /* 2431 * for connection level recovery we should not calculate 2432 * header digest. conn->hdr_size used for optimization 2433 * in hdr_extract() and will be re-negotiated at 2434 * set_param() time. 2435 */ 2436 if (flag == STOP_CONN_RECOVER) { 2437 conn->hdrdgst_en = 0; 2438 conn->datadgst_en = 0; 2439 if (session->state == ISCSI_STATE_IN_RECOVERY && 2440 old_stop_stage != STOP_CONN_RECOVER) { 2441 debug_scsi("blocking session\n"); 2442 iscsi_block_session(session->cls_session); 2443 } 2444 } 2445 2446 /* 2447 * flush queues. 2448 */ 2449 spin_lock_bh(&session->lock); 2450 if (flag == STOP_CONN_RECOVER) 2451 fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED); 2452 else 2453 fail_all_commands(conn, -1, DID_ERROR); 2454 flush_control_queues(session, conn); 2455 spin_unlock_bh(&session->lock); 2456 mutex_unlock(&session->eh_mutex); 2457 } 2458 2459 void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 2460 { 2461 struct iscsi_conn *conn = cls_conn->dd_data; 2462 struct iscsi_session *session = conn->session; 2463 2464 switch (flag) { 2465 case STOP_CONN_RECOVER: 2466 case STOP_CONN_TERM: 2467 iscsi_start_session_recovery(session, conn, flag); 2468 break; 2469 default: 2470 iscsi_conn_printk(KERN_ERR, conn, 2471 "invalid stop flag %d\n", flag); 2472 } 2473 } 2474 EXPORT_SYMBOL_GPL(iscsi_conn_stop); 2475 2476 int iscsi_conn_bind(struct iscsi_cls_session *cls_session, 2477 struct iscsi_cls_conn *cls_conn, int is_leading) 2478 { 2479 struct iscsi_session *session = cls_session->dd_data; 2480 struct iscsi_conn *conn = cls_conn->dd_data; 2481 2482 spin_lock_bh(&session->lock); 2483 if (is_leading) 2484 session->leadconn = conn; 2485 spin_unlock_bh(&session->lock); 2486 2487 /* 2488 * Unblock xmitworker(), Login Phase will pass through. 2489 */ 2490 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 2491 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 2492 return 0; 2493 } 2494 EXPORT_SYMBOL_GPL(iscsi_conn_bind); 2495 2496 2497 int iscsi_set_param(struct iscsi_cls_conn *cls_conn, 2498 enum iscsi_param param, char *buf, int buflen) 2499 { 2500 struct iscsi_conn *conn = cls_conn->dd_data; 2501 struct iscsi_session *session = conn->session; 2502 uint32_t value; 2503 2504 switch(param) { 2505 case ISCSI_PARAM_FAST_ABORT: 2506 sscanf(buf, "%d", &session->fast_abort); 2507 break; 2508 case ISCSI_PARAM_ABORT_TMO: 2509 sscanf(buf, "%d", &session->abort_timeout); 2510 break; 2511 case ISCSI_PARAM_LU_RESET_TMO: 2512 sscanf(buf, "%d", &session->lu_reset_timeout); 2513 break; 2514 case ISCSI_PARAM_PING_TMO: 2515 sscanf(buf, "%d", &conn->ping_timeout); 2516 break; 2517 case ISCSI_PARAM_RECV_TMO: 2518 sscanf(buf, "%d", &conn->recv_timeout); 2519 break; 2520 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2521 sscanf(buf, "%d", &conn->max_recv_dlength); 2522 break; 2523 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2524 sscanf(buf, "%d", &conn->max_xmit_dlength); 2525 break; 2526 case ISCSI_PARAM_HDRDGST_EN: 2527 sscanf(buf, "%d", &conn->hdrdgst_en); 2528 break; 2529 case ISCSI_PARAM_DATADGST_EN: 2530 sscanf(buf, "%d", &conn->datadgst_en); 2531 break; 2532 case ISCSI_PARAM_INITIAL_R2T_EN: 2533 sscanf(buf, "%d", &session->initial_r2t_en); 2534 break; 2535 case ISCSI_PARAM_MAX_R2T: 2536 sscanf(buf, "%d", &session->max_r2t); 2537 break; 2538 case ISCSI_PARAM_IMM_DATA_EN: 2539 sscanf(buf, "%d", &session->imm_data_en); 2540 break; 2541 case ISCSI_PARAM_FIRST_BURST: 2542 sscanf(buf, "%d", &session->first_burst); 2543 break; 2544 case ISCSI_PARAM_MAX_BURST: 2545 sscanf(buf, "%d", &session->max_burst); 2546 break; 2547 case ISCSI_PARAM_PDU_INORDER_EN: 2548 sscanf(buf, "%d", &session->pdu_inorder_en); 2549 break; 2550 case ISCSI_PARAM_DATASEQ_INORDER_EN: 2551 sscanf(buf, "%d", &session->dataseq_inorder_en); 2552 break; 2553 case ISCSI_PARAM_ERL: 2554 sscanf(buf, "%d", &session->erl); 2555 break; 2556 case ISCSI_PARAM_IFMARKER_EN: 2557 sscanf(buf, "%d", &value); 2558 BUG_ON(value); 2559 break; 2560 case ISCSI_PARAM_OFMARKER_EN: 2561 sscanf(buf, "%d", &value); 2562 BUG_ON(value); 2563 break; 2564 case ISCSI_PARAM_EXP_STATSN: 2565 sscanf(buf, "%u", &conn->exp_statsn); 2566 break; 2567 case ISCSI_PARAM_USERNAME: 2568 kfree(session->username); 2569 session->username = kstrdup(buf, GFP_KERNEL); 2570 if (!session->username) 2571 return -ENOMEM; 2572 break; 2573 case ISCSI_PARAM_USERNAME_IN: 2574 kfree(session->username_in); 2575 session->username_in = kstrdup(buf, GFP_KERNEL); 2576 if (!session->username_in) 2577 return -ENOMEM; 2578 break; 2579 case ISCSI_PARAM_PASSWORD: 2580 kfree(session->password); 2581 session->password = kstrdup(buf, GFP_KERNEL); 2582 if (!session->password) 2583 return -ENOMEM; 2584 break; 2585 case ISCSI_PARAM_PASSWORD_IN: 2586 kfree(session->password_in); 2587 session->password_in = kstrdup(buf, GFP_KERNEL); 2588 if (!session->password_in) 2589 return -ENOMEM; 2590 break; 2591 case ISCSI_PARAM_TARGET_NAME: 2592 /* this should not change between logins */ 2593 if (session->targetname) 2594 break; 2595 2596 session->targetname = kstrdup(buf, GFP_KERNEL); 2597 if (!session->targetname) 2598 return -ENOMEM; 2599 break; 2600 case ISCSI_PARAM_TPGT: 2601 sscanf(buf, "%d", &session->tpgt); 2602 break; 2603 case ISCSI_PARAM_PERSISTENT_PORT: 2604 sscanf(buf, "%d", &conn->persistent_port); 2605 break; 2606 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2607 /* 2608 * this is the address returned in discovery so it should 2609 * not change between logins. 2610 */ 2611 if (conn->persistent_address) 2612 break; 2613 2614 conn->persistent_address = kstrdup(buf, GFP_KERNEL); 2615 if (!conn->persistent_address) 2616 return -ENOMEM; 2617 break; 2618 case ISCSI_PARAM_IFACE_NAME: 2619 if (!session->ifacename) 2620 session->ifacename = kstrdup(buf, GFP_KERNEL); 2621 break; 2622 case ISCSI_PARAM_INITIATOR_NAME: 2623 if (!session->initiatorname) 2624 session->initiatorname = kstrdup(buf, GFP_KERNEL); 2625 break; 2626 default: 2627 return -ENOSYS; 2628 } 2629 2630 return 0; 2631 } 2632 EXPORT_SYMBOL_GPL(iscsi_set_param); 2633 2634 int iscsi_session_get_param(struct iscsi_cls_session *cls_session, 2635 enum iscsi_param param, char *buf) 2636 { 2637 struct iscsi_session *session = cls_session->dd_data; 2638 int len; 2639 2640 switch(param) { 2641 case ISCSI_PARAM_FAST_ABORT: 2642 len = sprintf(buf, "%d\n", session->fast_abort); 2643 break; 2644 case ISCSI_PARAM_ABORT_TMO: 2645 len = sprintf(buf, "%d\n", session->abort_timeout); 2646 break; 2647 case ISCSI_PARAM_LU_RESET_TMO: 2648 len = sprintf(buf, "%d\n", session->lu_reset_timeout); 2649 break; 2650 case ISCSI_PARAM_INITIAL_R2T_EN: 2651 len = sprintf(buf, "%d\n", session->initial_r2t_en); 2652 break; 2653 case ISCSI_PARAM_MAX_R2T: 2654 len = sprintf(buf, "%hu\n", session->max_r2t); 2655 break; 2656 case ISCSI_PARAM_IMM_DATA_EN: 2657 len = sprintf(buf, "%d\n", session->imm_data_en); 2658 break; 2659 case ISCSI_PARAM_FIRST_BURST: 2660 len = sprintf(buf, "%u\n", session->first_burst); 2661 break; 2662 case ISCSI_PARAM_MAX_BURST: 2663 len = sprintf(buf, "%u\n", session->max_burst); 2664 break; 2665 case ISCSI_PARAM_PDU_INORDER_EN: 2666 len = sprintf(buf, "%d\n", session->pdu_inorder_en); 2667 break; 2668 case ISCSI_PARAM_DATASEQ_INORDER_EN: 2669 len = sprintf(buf, "%d\n", session->dataseq_inorder_en); 2670 break; 2671 case ISCSI_PARAM_ERL: 2672 len = sprintf(buf, "%d\n", session->erl); 2673 break; 2674 case ISCSI_PARAM_TARGET_NAME: 2675 len = sprintf(buf, "%s\n", session->targetname); 2676 break; 2677 case ISCSI_PARAM_TPGT: 2678 len = sprintf(buf, "%d\n", session->tpgt); 2679 break; 2680 case ISCSI_PARAM_USERNAME: 2681 len = sprintf(buf, "%s\n", session->username); 2682 break; 2683 case ISCSI_PARAM_USERNAME_IN: 2684 len = sprintf(buf, "%s\n", session->username_in); 2685 break; 2686 case ISCSI_PARAM_PASSWORD: 2687 len = sprintf(buf, "%s\n", session->password); 2688 break; 2689 case ISCSI_PARAM_PASSWORD_IN: 2690 len = sprintf(buf, "%s\n", session->password_in); 2691 break; 2692 case ISCSI_PARAM_IFACE_NAME: 2693 len = sprintf(buf, "%s\n", session->ifacename); 2694 break; 2695 case ISCSI_PARAM_INITIATOR_NAME: 2696 if (!session->initiatorname) 2697 len = sprintf(buf, "%s\n", "unknown"); 2698 else 2699 len = sprintf(buf, "%s\n", session->initiatorname); 2700 break; 2701 default: 2702 return -ENOSYS; 2703 } 2704 2705 return len; 2706 } 2707 EXPORT_SYMBOL_GPL(iscsi_session_get_param); 2708 2709 int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, 2710 enum iscsi_param param, char *buf) 2711 { 2712 struct iscsi_conn *conn = cls_conn->dd_data; 2713 int len; 2714 2715 switch(param) { 2716 case ISCSI_PARAM_PING_TMO: 2717 len = sprintf(buf, "%u\n", conn->ping_timeout); 2718 break; 2719 case ISCSI_PARAM_RECV_TMO: 2720 len = sprintf(buf, "%u\n", conn->recv_timeout); 2721 break; 2722 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2723 len = sprintf(buf, "%u\n", conn->max_recv_dlength); 2724 break; 2725 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2726 len = sprintf(buf, "%u\n", conn->max_xmit_dlength); 2727 break; 2728 case ISCSI_PARAM_HDRDGST_EN: 2729 len = sprintf(buf, "%d\n", conn->hdrdgst_en); 2730 break; 2731 case ISCSI_PARAM_DATADGST_EN: 2732 len = sprintf(buf, "%d\n", conn->datadgst_en); 2733 break; 2734 case ISCSI_PARAM_IFMARKER_EN: 2735 len = sprintf(buf, "%d\n", conn->ifmarker_en); 2736 break; 2737 case ISCSI_PARAM_OFMARKER_EN: 2738 len = sprintf(buf, "%d\n", conn->ofmarker_en); 2739 break; 2740 case ISCSI_PARAM_EXP_STATSN: 2741 len = sprintf(buf, "%u\n", conn->exp_statsn); 2742 break; 2743 case ISCSI_PARAM_PERSISTENT_PORT: 2744 len = sprintf(buf, "%d\n", conn->persistent_port); 2745 break; 2746 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2747 len = sprintf(buf, "%s\n", conn->persistent_address); 2748 break; 2749 default: 2750 return -ENOSYS; 2751 } 2752 2753 return len; 2754 } 2755 EXPORT_SYMBOL_GPL(iscsi_conn_get_param); 2756 2757 int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2758 char *buf) 2759 { 2760 struct iscsi_host *ihost = shost_priv(shost); 2761 int len; 2762 2763 switch (param) { 2764 case ISCSI_HOST_PARAM_NETDEV_NAME: 2765 if (!ihost->netdev) 2766 len = sprintf(buf, "%s\n", "default"); 2767 else 2768 len = sprintf(buf, "%s\n", ihost->netdev); 2769 break; 2770 case ISCSI_HOST_PARAM_HWADDRESS: 2771 if (!ihost->hwaddress) 2772 len = sprintf(buf, "%s\n", "default"); 2773 else 2774 len = sprintf(buf, "%s\n", ihost->hwaddress); 2775 break; 2776 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2777 if (!ihost->initiatorname) 2778 len = sprintf(buf, "%s\n", "unknown"); 2779 else 2780 len = sprintf(buf, "%s\n", ihost->initiatorname); 2781 break; 2782 case ISCSI_HOST_PARAM_IPADDRESS: 2783 if (!strlen(ihost->local_address)) 2784 len = sprintf(buf, "%s\n", "unknown"); 2785 else 2786 len = sprintf(buf, "%s\n", 2787 ihost->local_address); 2788 break; 2789 default: 2790 return -ENOSYS; 2791 } 2792 2793 return len; 2794 } 2795 EXPORT_SYMBOL_GPL(iscsi_host_get_param); 2796 2797 int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2798 char *buf, int buflen) 2799 { 2800 struct iscsi_host *ihost = shost_priv(shost); 2801 2802 switch (param) { 2803 case ISCSI_HOST_PARAM_NETDEV_NAME: 2804 if (!ihost->netdev) 2805 ihost->netdev = kstrdup(buf, GFP_KERNEL); 2806 break; 2807 case ISCSI_HOST_PARAM_HWADDRESS: 2808 if (!ihost->hwaddress) 2809 ihost->hwaddress = kstrdup(buf, GFP_KERNEL); 2810 break; 2811 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2812 if (!ihost->initiatorname) 2813 ihost->initiatorname = kstrdup(buf, GFP_KERNEL); 2814 break; 2815 default: 2816 return -ENOSYS; 2817 } 2818 2819 return 0; 2820 } 2821 EXPORT_SYMBOL_GPL(iscsi_host_set_param); 2822 2823 MODULE_AUTHOR("Mike Christie"); 2824 MODULE_DESCRIPTION("iSCSI library functions"); 2825 MODULE_LICENSE("GPL"); 2826