1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * iSCSI lib functions 4 * 5 * Copyright (C) 2006 Red Hat, Inc. All rights reserved. 6 * Copyright (C) 2004 - 2006 Mike Christie 7 * Copyright (C) 2004 - 2005 Dmitry Yusupov 8 * Copyright (C) 2004 - 2005 Alex Aizman 9 * maintained by open-iscsi@googlegroups.com 10 */ 11 #include <linux/types.h> 12 #include <linux/kfifo.h> 13 #include <linux/delay.h> 14 #include <linux/log2.h> 15 #include <linux/slab.h> 16 #include <linux/sched/signal.h> 17 #include <linux/module.h> 18 #include <asm/unaligned.h> 19 #include <net/tcp.h> 20 #include <scsi/scsi_cmnd.h> 21 #include <scsi/scsi_device.h> 22 #include <scsi/scsi_eh.h> 23 #include <scsi/scsi_tcq.h> 24 #include <scsi/scsi_host.h> 25 #include <scsi/scsi.h> 26 #include <scsi/iscsi_proto.h> 27 #include <scsi/scsi_transport.h> 28 #include <scsi/scsi_transport_iscsi.h> 29 #include <scsi/libiscsi.h> 30 #include <trace/events/iscsi.h> 31 32 static int iscsi_dbg_lib_conn; 33 module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int, 34 S_IRUGO | S_IWUSR); 35 MODULE_PARM_DESC(debug_libiscsi_conn, 36 "Turn on debugging for connections in libiscsi module. " 37 "Set to 1 to turn on, and zero to turn off. Default is off."); 38 39 static int iscsi_dbg_lib_session; 40 module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int, 41 S_IRUGO | S_IWUSR); 42 MODULE_PARM_DESC(debug_libiscsi_session, 43 "Turn on debugging for sessions in libiscsi module. " 44 "Set to 1 to turn on, and zero to turn off. Default is off."); 45 46 static int iscsi_dbg_lib_eh; 47 module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int, 48 S_IRUGO | S_IWUSR); 49 MODULE_PARM_DESC(debug_libiscsi_eh, 50 "Turn on debugging for error handling in libiscsi module. " 51 "Set to 1 to turn on, and zero to turn off. Default is off."); 52 53 #define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \ 54 do { \ 55 if (iscsi_dbg_lib_conn) \ 56 iscsi_conn_printk(KERN_INFO, _conn, \ 57 "%s " dbg_fmt, \ 58 __func__, ##arg); \ 59 iscsi_dbg_trace(trace_iscsi_dbg_conn, \ 60 &(_conn)->cls_conn->dev, \ 61 "%s " dbg_fmt, __func__, ##arg);\ 62 } while (0); 63 64 #define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \ 65 do { \ 66 if (iscsi_dbg_lib_session) \ 67 iscsi_session_printk(KERN_INFO, _session, \ 68 "%s " dbg_fmt, \ 69 __func__, ##arg); \ 70 iscsi_dbg_trace(trace_iscsi_dbg_session, \ 71 &(_session)->cls_session->dev, \ 72 "%s " dbg_fmt, __func__, ##arg); \ 73 } while (0); 74 75 #define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \ 76 do { \ 77 if (iscsi_dbg_lib_eh) \ 78 iscsi_session_printk(KERN_INFO, _session, \ 79 "%s " dbg_fmt, \ 80 __func__, ##arg); \ 81 iscsi_dbg_trace(trace_iscsi_dbg_eh, \ 82 &(_session)->cls_session->dev, \ 83 "%s " dbg_fmt, __func__, ##arg); \ 84 } while (0); 85 86 #define ISCSI_CMD_COMPL_WAIT 5 87 88 inline void iscsi_conn_queue_xmit(struct iscsi_conn *conn) 89 { 90 struct Scsi_Host *shost = conn->session->host; 91 struct iscsi_host *ihost = shost_priv(shost); 92 93 if (ihost->workq) 94 queue_work(ihost->workq, &conn->xmitwork); 95 } 96 EXPORT_SYMBOL_GPL(iscsi_conn_queue_xmit); 97 98 inline void iscsi_conn_queue_recv(struct iscsi_conn *conn) 99 { 100 struct Scsi_Host *shost = conn->session->host; 101 struct iscsi_host *ihost = shost_priv(shost); 102 103 if (ihost->workq && !test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags)) 104 queue_work(ihost->workq, &conn->recvwork); 105 } 106 EXPORT_SYMBOL_GPL(iscsi_conn_queue_recv); 107 108 static void __iscsi_update_cmdsn(struct iscsi_session *session, 109 uint32_t exp_cmdsn, uint32_t max_cmdsn) 110 { 111 /* 112 * standard specifies this check for when to update expected and 113 * max sequence numbers 114 */ 115 if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1)) 116 return; 117 118 if (exp_cmdsn != session->exp_cmdsn && 119 !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn)) 120 session->exp_cmdsn = exp_cmdsn; 121 122 if (max_cmdsn != session->max_cmdsn && 123 !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) 124 session->max_cmdsn = max_cmdsn; 125 } 126 127 void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) 128 { 129 __iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn), 130 be32_to_cpu(hdr->max_cmdsn)); 131 } 132 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); 133 134 /** 135 * iscsi_prep_data_out_pdu - initialize Data-Out 136 * @task: scsi command task 137 * @r2t: R2T info 138 * @hdr: iscsi data in pdu 139 * 140 * Notes: 141 * Initialize Data-Out within this R2T sequence and finds 142 * proper data_offset within this SCSI command. 143 * 144 * This function is called with connection lock taken. 145 **/ 146 void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t, 147 struct iscsi_data *hdr) 148 { 149 struct iscsi_conn *conn = task->conn; 150 unsigned int left = r2t->data_length - r2t->sent; 151 152 task->hdr_len = sizeof(struct iscsi_data); 153 154 memset(hdr, 0, sizeof(struct iscsi_data)); 155 hdr->ttt = r2t->ttt; 156 hdr->datasn = cpu_to_be32(r2t->datasn); 157 r2t->datasn++; 158 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 159 hdr->lun = task->lun; 160 hdr->itt = task->hdr_itt; 161 hdr->exp_statsn = r2t->exp_statsn; 162 hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent); 163 if (left > conn->max_xmit_dlength) { 164 hton24(hdr->dlength, conn->max_xmit_dlength); 165 r2t->data_count = conn->max_xmit_dlength; 166 hdr->flags = 0; 167 } else { 168 hton24(hdr->dlength, left); 169 r2t->data_count = left; 170 hdr->flags = ISCSI_FLAG_CMD_FINAL; 171 } 172 conn->dataout_pdus_cnt++; 173 } 174 EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu); 175 176 static int iscsi_add_hdr(struct iscsi_task *task, unsigned len) 177 { 178 unsigned exp_len = task->hdr_len + len; 179 180 if (exp_len > task->hdr_max) { 181 WARN_ON(1); 182 return -EINVAL; 183 } 184 185 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */ 186 task->hdr_len = exp_len; 187 return 0; 188 } 189 190 /* 191 * make an extended cdb AHS 192 */ 193 static int iscsi_prep_ecdb_ahs(struct iscsi_task *task) 194 { 195 struct scsi_cmnd *cmd = task->sc; 196 unsigned rlen, pad_len; 197 unsigned short ahslength; 198 struct iscsi_ecdb_ahdr *ecdb_ahdr; 199 int rc; 200 201 ecdb_ahdr = iscsi_next_hdr(task); 202 rlen = cmd->cmd_len - ISCSI_CDB_SIZE; 203 204 BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb)); 205 ahslength = rlen + sizeof(ecdb_ahdr->reserved); 206 207 pad_len = iscsi_padding(rlen); 208 209 rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) + 210 sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len); 211 if (rc) 212 return rc; 213 214 if (pad_len) 215 memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len); 216 217 ecdb_ahdr->ahslength = cpu_to_be16(ahslength); 218 ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB; 219 ecdb_ahdr->reserved = 0; 220 memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen); 221 222 ISCSI_DBG_SESSION(task->conn->session, 223 "iscsi_prep_ecdb_ahs: varlen_cdb_len %d " 224 "rlen %d pad_len %d ahs_length %d iscsi_headers_size " 225 "%u\n", cmd->cmd_len, rlen, pad_len, ahslength, 226 task->hdr_len); 227 return 0; 228 } 229 230 /** 231 * iscsi_check_tmf_restrictions - check if a task is affected by TMF 232 * @task: iscsi task 233 * @opcode: opcode to check for 234 * 235 * During TMF a task has to be checked if it's affected. 236 * All unrelated I/O can be passed through, but I/O to the 237 * affected LUN should be restricted. 238 * If 'fast_abort' is set we won't be sending any I/O to the 239 * affected LUN. 240 * Otherwise the target is waiting for all TTTs to be completed, 241 * so we have to send all outstanding Data-Out PDUs to the target. 242 */ 243 static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) 244 { 245 struct iscsi_session *session = task->conn->session; 246 struct iscsi_tm *tmf = &session->tmhdr; 247 u64 hdr_lun; 248 249 if (session->tmf_state == TMF_INITIAL) 250 return 0; 251 252 if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC) 253 return 0; 254 255 switch (ISCSI_TM_FUNC_VALUE(tmf)) { 256 case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: 257 /* 258 * Allow PDUs for unrelated LUNs 259 */ 260 hdr_lun = scsilun_to_int(&tmf->lun); 261 if (hdr_lun != task->sc->device->lun) 262 return 0; 263 fallthrough; 264 case ISCSI_TM_FUNC_TARGET_WARM_RESET: 265 /* 266 * Fail all SCSI cmd PDUs 267 */ 268 if (opcode != ISCSI_OP_SCSI_DATA_OUT) { 269 iscsi_session_printk(KERN_INFO, session, 270 "task [op %x itt 0x%x/0x%x] rejected.\n", 271 opcode, task->itt, task->hdr_itt); 272 return -EACCES; 273 } 274 /* 275 * And also all data-out PDUs in response to R2T 276 * if fast_abort is set. 277 */ 278 if (session->fast_abort) { 279 iscsi_session_printk(KERN_INFO, session, 280 "task [op %x itt 0x%x/0x%x] fast abort.\n", 281 opcode, task->itt, task->hdr_itt); 282 return -EACCES; 283 } 284 break; 285 case ISCSI_TM_FUNC_ABORT_TASK: 286 /* 287 * the caller has already checked if the task 288 * they want to abort was in the pending queue so if 289 * we are here the cmd pdu has gone out already, and 290 * we will only hit this for data-outs 291 */ 292 if (opcode == ISCSI_OP_SCSI_DATA_OUT && 293 task->hdr_itt == tmf->rtt) { 294 ISCSI_DBG_SESSION(session, 295 "Preventing task %x/%x from sending " 296 "data-out due to abort task in " 297 "progress\n", task->itt, 298 task->hdr_itt); 299 return -EACCES; 300 } 301 break; 302 } 303 304 return 0; 305 } 306 307 /** 308 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu 309 * @task: iscsi task 310 * 311 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set 312 * fields like dlength or final based on how much data it sends 313 */ 314 static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) 315 { 316 struct iscsi_conn *conn = task->conn; 317 struct iscsi_session *session = conn->session; 318 struct scsi_cmnd *sc = task->sc; 319 struct iscsi_scsi_req *hdr; 320 unsigned hdrlength, cmd_len, transfer_length; 321 itt_t itt; 322 int rc; 323 324 rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD); 325 if (rc) 326 return rc; 327 328 if (conn->session->tt->alloc_pdu) { 329 rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); 330 if (rc) 331 return rc; 332 } 333 hdr = (struct iscsi_scsi_req *)task->hdr; 334 itt = hdr->itt; 335 memset(hdr, 0, sizeof(*hdr)); 336 337 if (session->tt->parse_pdu_itt) 338 hdr->itt = task->hdr_itt = itt; 339 else 340 hdr->itt = task->hdr_itt = build_itt(task->itt, 341 task->conn->session->age); 342 task->hdr_len = 0; 343 rc = iscsi_add_hdr(task, sizeof(*hdr)); 344 if (rc) 345 return rc; 346 hdr->opcode = ISCSI_OP_SCSI_CMD; 347 hdr->flags = ISCSI_ATTR_SIMPLE; 348 int_to_scsilun(sc->device->lun, &hdr->lun); 349 task->lun = hdr->lun; 350 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 351 cmd_len = sc->cmd_len; 352 if (cmd_len < ISCSI_CDB_SIZE) 353 memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len); 354 else if (cmd_len > ISCSI_CDB_SIZE) { 355 rc = iscsi_prep_ecdb_ahs(task); 356 if (rc) 357 return rc; 358 cmd_len = ISCSI_CDB_SIZE; 359 } 360 memcpy(hdr->cdb, sc->cmnd, cmd_len); 361 362 task->imm_count = 0; 363 if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) 364 task->protected = true; 365 366 transfer_length = scsi_transfer_length(sc); 367 hdr->data_length = cpu_to_be32(transfer_length); 368 if (sc->sc_data_direction == DMA_TO_DEVICE) { 369 struct iscsi_r2t_info *r2t = &task->unsol_r2t; 370 371 hdr->flags |= ISCSI_FLAG_CMD_WRITE; 372 /* 373 * Write counters: 374 * 375 * imm_count bytes to be sent right after 376 * SCSI PDU Header 377 * 378 * unsol_count bytes(as Data-Out) to be sent 379 * without R2T ack right after 380 * immediate data 381 * 382 * r2t data_length bytes to be sent via R2T ack's 383 * 384 * pad_count bytes to be sent as zero-padding 385 */ 386 memset(r2t, 0, sizeof(*r2t)); 387 388 if (session->imm_data_en) { 389 if (transfer_length >= session->first_burst) 390 task->imm_count = min(session->first_burst, 391 conn->max_xmit_dlength); 392 else 393 task->imm_count = min(transfer_length, 394 conn->max_xmit_dlength); 395 hton24(hdr->dlength, task->imm_count); 396 } else 397 zero_data(hdr->dlength); 398 399 if (!session->initial_r2t_en) { 400 r2t->data_length = min(session->first_burst, 401 transfer_length) - 402 task->imm_count; 403 r2t->data_offset = task->imm_count; 404 r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); 405 r2t->exp_statsn = cpu_to_be32(conn->exp_statsn); 406 } 407 408 if (!task->unsol_r2t.data_length) 409 /* No unsolicit Data-Out's */ 410 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 411 } else { 412 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 413 zero_data(hdr->dlength); 414 415 if (sc->sc_data_direction == DMA_FROM_DEVICE) 416 hdr->flags |= ISCSI_FLAG_CMD_READ; 417 } 418 419 /* calculate size of additional header segments (AHSs) */ 420 hdrlength = task->hdr_len - sizeof(*hdr); 421 422 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1)); 423 hdrlength /= ISCSI_PAD_LEN; 424 425 WARN_ON(hdrlength >= 256); 426 hdr->hlength = hdrlength & 0xFF; 427 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn); 428 429 if (session->tt->init_task && session->tt->init_task(task)) 430 return -EIO; 431 432 task->state = ISCSI_TASK_RUNNING; 433 session->cmdsn++; 434 435 conn->scsicmd_pdus_cnt++; 436 ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " 437 "itt 0x%x len %d cmdsn %d win %d]\n", 438 sc->sc_data_direction == DMA_TO_DEVICE ? 439 "write" : "read", conn->id, sc, sc->cmnd[0], 440 task->itt, transfer_length, 441 session->cmdsn, 442 session->max_cmdsn - session->exp_cmdsn + 1); 443 return 0; 444 } 445 446 /** 447 * iscsi_free_task - free a task 448 * @task: iscsi cmd task 449 * 450 * Must be called with session back_lock. 451 * This function returns the scsi command to scsi-ml or cleans 452 * up mgmt tasks then returns the task to the pool. 453 */ 454 static void iscsi_free_task(struct iscsi_task *task) 455 { 456 struct iscsi_conn *conn = task->conn; 457 struct iscsi_session *session = conn->session; 458 struct scsi_cmnd *sc = task->sc; 459 int oldstate = task->state; 460 461 ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n", 462 task->itt, task->state, task->sc); 463 464 session->tt->cleanup_task(task); 465 task->state = ISCSI_TASK_FREE; 466 task->sc = NULL; 467 /* 468 * login task is preallocated so do not free 469 */ 470 if (conn->login_task == task) 471 return; 472 473 kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*)); 474 475 if (sc) { 476 /* SCSI eh reuses commands to verify us */ 477 iscsi_cmd(sc)->task = NULL; 478 /* 479 * queue command may call this to free the task, so 480 * it will decide how to return sc to scsi-ml. 481 */ 482 if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ) 483 scsi_done(sc); 484 } 485 } 486 487 bool iscsi_get_task(struct iscsi_task *task) 488 { 489 return refcount_inc_not_zero(&task->refcount); 490 } 491 EXPORT_SYMBOL_GPL(iscsi_get_task); 492 493 /** 494 * __iscsi_put_task - drop the refcount on a task 495 * @task: iscsi_task to drop the refcount on 496 * 497 * The back_lock must be held when calling in case it frees the task. 498 */ 499 void __iscsi_put_task(struct iscsi_task *task) 500 { 501 if (refcount_dec_and_test(&task->refcount)) 502 iscsi_free_task(task); 503 } 504 EXPORT_SYMBOL_GPL(__iscsi_put_task); 505 506 void iscsi_put_task(struct iscsi_task *task) 507 { 508 struct iscsi_session *session = task->conn->session; 509 510 if (refcount_dec_and_test(&task->refcount)) { 511 spin_lock_bh(&session->back_lock); 512 iscsi_free_task(task); 513 spin_unlock_bh(&session->back_lock); 514 } 515 } 516 EXPORT_SYMBOL_GPL(iscsi_put_task); 517 518 /** 519 * iscsi_complete_task - finish a task 520 * @task: iscsi cmd task 521 * @state: state to complete task with 522 * 523 * Must be called with session back_lock. 524 */ 525 static void iscsi_complete_task(struct iscsi_task *task, int state) 526 { 527 struct iscsi_conn *conn = task->conn; 528 529 ISCSI_DBG_SESSION(conn->session, 530 "complete task itt 0x%x state %d sc %p\n", 531 task->itt, task->state, task->sc); 532 if (task->state == ISCSI_TASK_COMPLETED || 533 task->state == ISCSI_TASK_ABRT_TMF || 534 task->state == ISCSI_TASK_ABRT_SESS_RECOV || 535 task->state == ISCSI_TASK_REQUEUE_SCSIQ) 536 return; 537 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); 538 task->state = state; 539 540 if (READ_ONCE(conn->ping_task) == task) 541 WRITE_ONCE(conn->ping_task, NULL); 542 543 /* release get from queueing */ 544 __iscsi_put_task(task); 545 } 546 547 /** 548 * iscsi_complete_scsi_task - finish scsi task normally 549 * @task: iscsi task for scsi cmd 550 * @exp_cmdsn: expected cmd sn in cpu format 551 * @max_cmdsn: max cmd sn in cpu format 552 * 553 * This is used when drivers do not need or cannot perform 554 * lower level pdu processing. 555 * 556 * Called with session back_lock 557 */ 558 void iscsi_complete_scsi_task(struct iscsi_task *task, 559 uint32_t exp_cmdsn, uint32_t max_cmdsn) 560 { 561 struct iscsi_conn *conn = task->conn; 562 563 ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt); 564 565 conn->last_recv = jiffies; 566 __iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn); 567 iscsi_complete_task(task, ISCSI_TASK_COMPLETED); 568 } 569 EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task); 570 571 /* 572 * Must be called with back and frwd lock 573 */ 574 static bool cleanup_queued_task(struct iscsi_task *task) 575 { 576 struct iscsi_conn *conn = task->conn; 577 bool early_complete = false; 578 579 /* 580 * We might have raced where we handled a R2T early and got a response 581 * but have not yet taken the task off the requeue list, then a TMF or 582 * recovery happened and so we can still see it here. 583 */ 584 if (task->state == ISCSI_TASK_COMPLETED) 585 early_complete = true; 586 587 if (!list_empty(&task->running)) { 588 list_del_init(&task->running); 589 /* 590 * If it's on a list but still running this could be cleanup 591 * from a TMF or session recovery. 592 */ 593 if (task->state == ISCSI_TASK_RUNNING || 594 task->state == ISCSI_TASK_COMPLETED) 595 __iscsi_put_task(task); 596 } 597 598 if (conn->session->running_aborted_task == task) { 599 conn->session->running_aborted_task = NULL; 600 __iscsi_put_task(task); 601 } 602 603 if (conn->task == task) { 604 conn->task = NULL; 605 __iscsi_put_task(task); 606 } 607 608 return early_complete; 609 } 610 611 /* 612 * session back and frwd lock must be held and if not called for a task that 613 * is still pending or from the xmit thread, then xmit thread must be suspended 614 */ 615 static void __fail_scsi_task(struct iscsi_task *task, int err) 616 { 617 struct iscsi_conn *conn = task->conn; 618 struct scsi_cmnd *sc; 619 int state; 620 621 if (cleanup_queued_task(task)) 622 return; 623 624 if (task->state == ISCSI_TASK_PENDING) { 625 /* 626 * cmd never made it to the xmit thread, so we should not count 627 * the cmd in the sequencing 628 */ 629 conn->session->queued_cmdsn--; 630 /* it was never sent so just complete like normal */ 631 state = ISCSI_TASK_COMPLETED; 632 } else if (err == DID_TRANSPORT_DISRUPTED) 633 state = ISCSI_TASK_ABRT_SESS_RECOV; 634 else 635 state = ISCSI_TASK_ABRT_TMF; 636 637 sc = task->sc; 638 sc->result = err << 16; 639 scsi_set_resid(sc, scsi_bufflen(sc)); 640 iscsi_complete_task(task, state); 641 } 642 643 static void fail_scsi_task(struct iscsi_task *task, int err) 644 { 645 struct iscsi_session *session = task->conn->session; 646 647 spin_lock_bh(&session->back_lock); 648 __fail_scsi_task(task, err); 649 spin_unlock_bh(&session->back_lock); 650 } 651 652 static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, 653 struct iscsi_task *task) 654 { 655 struct iscsi_session *session = conn->session; 656 struct iscsi_hdr *hdr = task->hdr; 657 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; 658 uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK; 659 660 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) 661 return -ENOTCONN; 662 663 if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT) 664 nop->exp_statsn = cpu_to_be32(conn->exp_statsn); 665 /* 666 * pre-format CmdSN for outgoing PDU. 667 */ 668 nop->cmdsn = cpu_to_be32(session->cmdsn); 669 if (hdr->itt != RESERVED_ITT) { 670 /* 671 * TODO: We always use immediate for normal session pdus. 672 * If we start to send tmfs or nops as non-immediate then 673 * we should start checking the cmdsn numbers for mgmt tasks. 674 * 675 * During discovery sessions iscsid sends TEXT as non immediate, 676 * but we always only send one PDU at a time. 677 */ 678 if (conn->c_stage == ISCSI_CONN_STARTED && 679 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { 680 session->queued_cmdsn++; 681 session->cmdsn++; 682 } 683 } 684 685 if (session->tt->init_task && session->tt->init_task(task)) 686 return -EIO; 687 688 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 689 session->state = ISCSI_STATE_LOGGING_OUT; 690 691 task->state = ISCSI_TASK_RUNNING; 692 ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x " 693 "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK, 694 hdr->itt, task->data_count); 695 return 0; 696 } 697 698 /** 699 * iscsi_alloc_mgmt_task - allocate and setup a mgmt task. 700 * @conn: iscsi conn that the task will be sent on. 701 * @hdr: iscsi pdu that will be sent. 702 * @data: buffer for data segment if needed. 703 * @data_size: length of data in bytes. 704 */ 705 static struct iscsi_task * 706 iscsi_alloc_mgmt_task(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 707 char *data, uint32_t data_size) 708 { 709 struct iscsi_session *session = conn->session; 710 uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK; 711 struct iscsi_task *task; 712 itt_t itt; 713 714 if (session->state == ISCSI_STATE_TERMINATE || 715 !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags)) 716 return NULL; 717 718 if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) { 719 /* 720 * Login and Text are sent serially, in 721 * request-followed-by-response sequence. 722 * Same task can be used. Same ITT must be used. 723 * Note that login_task is preallocated at conn_create(). 724 */ 725 if (conn->login_task->state != ISCSI_TASK_FREE) { 726 iscsi_conn_printk(KERN_ERR, conn, "Login/Text in " 727 "progress. Cannot start new task.\n"); 728 return NULL; 729 } 730 731 if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) { 732 iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN); 733 return NULL; 734 } 735 736 task = conn->login_task; 737 } else { 738 if (session->state != ISCSI_STATE_LOGGED_IN) 739 return NULL; 740 741 if (data_size != 0) { 742 iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode); 743 return NULL; 744 } 745 746 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 747 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 748 749 if (!kfifo_out(&session->cmdpool.queue, 750 (void*)&task, sizeof(void*))) 751 return NULL; 752 } 753 /* 754 * released in complete pdu for task we expect a response for, and 755 * released by the lld when it has transmitted the task for 756 * pdus we do not expect a response for. 757 */ 758 refcount_set(&task->refcount, 1); 759 task->conn = conn; 760 task->sc = NULL; 761 INIT_LIST_HEAD(&task->running); 762 task->state = ISCSI_TASK_PENDING; 763 764 if (data_size) { 765 memcpy(task->data, data, data_size); 766 task->data_count = data_size; 767 } else 768 task->data_count = 0; 769 770 if (conn->session->tt->alloc_pdu) { 771 if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { 772 iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " 773 "pdu for mgmt task.\n"); 774 goto free_task; 775 } 776 } 777 778 itt = task->hdr->itt; 779 task->hdr_len = sizeof(struct iscsi_hdr); 780 memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); 781 782 if (hdr->itt != RESERVED_ITT) { 783 if (session->tt->parse_pdu_itt) 784 task->hdr->itt = itt; 785 else 786 task->hdr->itt = build_itt(task->itt, 787 task->conn->session->age); 788 } 789 790 return task; 791 792 free_task: 793 iscsi_put_task(task); 794 return NULL; 795 } 796 797 /** 798 * iscsi_send_mgmt_task - Send task created with iscsi_alloc_mgmt_task. 799 * @task: iscsi task to send. 800 * 801 * On failure this returns a non-zero error code, and the driver must free 802 * the task with iscsi_put_task; 803 */ 804 static int iscsi_send_mgmt_task(struct iscsi_task *task) 805 { 806 struct iscsi_conn *conn = task->conn; 807 struct iscsi_session *session = conn->session; 808 struct iscsi_host *ihost = shost_priv(conn->session->host); 809 int rc = 0; 810 811 if (!ihost->workq) { 812 rc = iscsi_prep_mgmt_task(conn, task); 813 if (rc) 814 return rc; 815 816 rc = session->tt->xmit_task(task); 817 if (rc) 818 return rc; 819 } else { 820 list_add_tail(&task->running, &conn->mgmtqueue); 821 iscsi_conn_queue_xmit(conn); 822 } 823 824 return 0; 825 } 826 827 static int __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 828 char *data, uint32_t data_size) 829 { 830 struct iscsi_task *task; 831 int rc; 832 833 task = iscsi_alloc_mgmt_task(conn, hdr, data, data_size); 834 if (!task) 835 return -ENOMEM; 836 837 rc = iscsi_send_mgmt_task(task); 838 if (rc) 839 iscsi_put_task(task); 840 return rc; 841 } 842 843 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 844 char *data, uint32_t data_size) 845 { 846 struct iscsi_conn *conn = cls_conn->dd_data; 847 struct iscsi_session *session = conn->session; 848 int err = 0; 849 850 spin_lock_bh(&session->frwd_lock); 851 if (__iscsi_conn_send_pdu(conn, hdr, data, data_size)) 852 err = -EPERM; 853 spin_unlock_bh(&session->frwd_lock); 854 return err; 855 } 856 EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); 857 858 /** 859 * iscsi_scsi_cmd_rsp - SCSI Command Response processing 860 * @conn: iscsi connection 861 * @hdr: iscsi header 862 * @task: scsi command task 863 * @data: cmd data buffer 864 * @datalen: len of buffer 865 * 866 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and 867 * then completes the command and task. called under back_lock 868 **/ 869 static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 870 struct iscsi_task *task, char *data, 871 int datalen) 872 { 873 struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr; 874 struct iscsi_session *session = conn->session; 875 struct scsi_cmnd *sc = task->sc; 876 877 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 878 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 879 880 sc->result = (DID_OK << 16) | rhdr->cmd_status; 881 882 if (task->protected) { 883 sector_t sector; 884 u8 ascq; 885 886 /** 887 * Transports that didn't implement check_protection 888 * callback but still published T10-PI support to scsi-mid 889 * deserve this BUG_ON. 890 **/ 891 BUG_ON(!session->tt->check_protection); 892 893 ascq = session->tt->check_protection(task, §or); 894 if (ascq) { 895 scsi_build_sense(sc, 1, ILLEGAL_REQUEST, 0x10, ascq); 896 scsi_set_sense_information(sc->sense_buffer, 897 SCSI_SENSE_BUFFERSIZE, 898 sector); 899 goto out; 900 } 901 } 902 903 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) { 904 sc->result = DID_ERROR << 16; 905 goto out; 906 } 907 908 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) { 909 uint16_t senselen; 910 911 if (datalen < 2) { 912 invalid_datalen: 913 iscsi_conn_printk(KERN_ERR, conn, 914 "Got CHECK_CONDITION but invalid data " 915 "buffer size of %d\n", datalen); 916 sc->result = DID_BAD_TARGET << 16; 917 goto out; 918 } 919 920 senselen = get_unaligned_be16(data); 921 if (datalen < senselen) 922 goto invalid_datalen; 923 924 memcpy(sc->sense_buffer, data + 2, 925 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); 926 ISCSI_DBG_SESSION(session, "copied %d bytes of sense\n", 927 min_t(uint16_t, senselen, 928 SCSI_SENSE_BUFFERSIZE)); 929 } 930 931 if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW | 932 ISCSI_FLAG_CMD_BIDI_OVERFLOW)) { 933 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 934 } 935 936 if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW | 937 ISCSI_FLAG_CMD_OVERFLOW)) { 938 int res_count = be32_to_cpu(rhdr->residual_count); 939 940 if (res_count > 0 && 941 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || 942 res_count <= scsi_bufflen(sc))) 943 /* write side for bidi or uni-io set_resid */ 944 scsi_set_resid(sc, res_count); 945 else 946 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 947 } 948 out: 949 ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n", 950 sc, sc->result, task->itt); 951 conn->scsirsp_pdus_cnt++; 952 iscsi_complete_task(task, ISCSI_TASK_COMPLETED); 953 } 954 955 /** 956 * iscsi_data_in_rsp - SCSI Data-In Response processing 957 * @conn: iscsi connection 958 * @hdr: iscsi pdu 959 * @task: scsi command task 960 * 961 * iscsi_data_in_rsp sets up the scsi_cmnd fields based on the data received 962 * then completes the command and task. called under back_lock 963 **/ 964 static void 965 iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 966 struct iscsi_task *task) 967 { 968 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr; 969 struct scsi_cmnd *sc = task->sc; 970 971 if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) 972 return; 973 974 iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr); 975 sc->result = (DID_OK << 16) | rhdr->cmd_status; 976 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 977 if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | 978 ISCSI_FLAG_DATA_OVERFLOW)) { 979 int res_count = be32_to_cpu(rhdr->residual_count); 980 981 if (res_count > 0 && 982 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || 983 res_count <= sc->sdb.length)) 984 scsi_set_resid(sc, res_count); 985 else 986 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 987 } 988 989 ISCSI_DBG_SESSION(conn->session, "data in with status done " 990 "[sc %p res %d itt 0x%x]\n", 991 sc, sc->result, task->itt); 992 conn->scsirsp_pdus_cnt++; 993 iscsi_complete_task(task, ISCSI_TASK_COMPLETED); 994 } 995 996 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 997 { 998 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; 999 struct iscsi_session *session = conn->session; 1000 1001 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 1002 conn->tmfrsp_pdus_cnt++; 1003 1004 if (session->tmf_state != TMF_QUEUED) 1005 return; 1006 1007 if (tmf->response == ISCSI_TMF_RSP_COMPLETE) 1008 session->tmf_state = TMF_SUCCESS; 1009 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) 1010 session->tmf_state = TMF_NOT_FOUND; 1011 else 1012 session->tmf_state = TMF_FAILED; 1013 wake_up(&session->ehwait); 1014 } 1015 1016 static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) 1017 { 1018 struct iscsi_nopout hdr; 1019 struct iscsi_task *task; 1020 1021 if (!rhdr) { 1022 if (READ_ONCE(conn->ping_task)) 1023 return -EINVAL; 1024 } 1025 1026 memset(&hdr, 0, sizeof(struct iscsi_nopout)); 1027 hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; 1028 hdr.flags = ISCSI_FLAG_CMD_FINAL; 1029 1030 if (rhdr) { 1031 hdr.lun = rhdr->lun; 1032 hdr.ttt = rhdr->ttt; 1033 hdr.itt = RESERVED_ITT; 1034 } else 1035 hdr.ttt = RESERVED_ITT; 1036 1037 task = iscsi_alloc_mgmt_task(conn, (struct iscsi_hdr *)&hdr, NULL, 0); 1038 if (!task) 1039 return -ENOMEM; 1040 1041 if (!rhdr) 1042 WRITE_ONCE(conn->ping_task, task); 1043 1044 if (iscsi_send_mgmt_task(task)) { 1045 if (!rhdr) 1046 WRITE_ONCE(conn->ping_task, NULL); 1047 iscsi_put_task(task); 1048 1049 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); 1050 return -EIO; 1051 } else if (!rhdr) { 1052 /* only track our nops */ 1053 conn->last_ping = jiffies; 1054 } 1055 1056 return 0; 1057 } 1058 1059 /** 1060 * iscsi_nop_out_rsp - SCSI NOP Response processing 1061 * @task: scsi command task 1062 * @nop: the nop structure 1063 * @data: where to put the data 1064 * @datalen: length of data 1065 * 1066 * iscsi_nop_out_rsp handles nop response from use or 1067 * from user space. called under back_lock 1068 **/ 1069 static int iscsi_nop_out_rsp(struct iscsi_task *task, 1070 struct iscsi_nopin *nop, char *data, int datalen) 1071 { 1072 struct iscsi_conn *conn = task->conn; 1073 int rc = 0; 1074 1075 if (READ_ONCE(conn->ping_task) != task) { 1076 /* 1077 * If this is not in response to one of our 1078 * nops then it must be from userspace. 1079 */ 1080 if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop, 1081 data, datalen)) 1082 rc = ISCSI_ERR_CONN_FAILED; 1083 } else 1084 mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); 1085 iscsi_complete_task(task, ISCSI_TASK_COMPLETED); 1086 return rc; 1087 } 1088 1089 static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 1090 char *data, int datalen) 1091 { 1092 struct iscsi_reject *reject = (struct iscsi_reject *)hdr; 1093 struct iscsi_hdr rejected_pdu; 1094 int opcode, rc = 0; 1095 1096 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1; 1097 1098 if (ntoh24(reject->dlength) > datalen || 1099 ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) { 1100 iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected " 1101 "pdu. Invalid data length (pdu dlength " 1102 "%u, datalen %d\n", ntoh24(reject->dlength), 1103 datalen); 1104 return ISCSI_ERR_PROTO; 1105 } 1106 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); 1107 opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK; 1108 1109 switch (reject->reason) { 1110 case ISCSI_REASON_DATA_DIGEST_ERROR: 1111 iscsi_conn_printk(KERN_ERR, conn, 1112 "pdu (op 0x%x itt 0x%x) rejected " 1113 "due to DataDigest error.\n", 1114 opcode, rejected_pdu.itt); 1115 break; 1116 case ISCSI_REASON_IMM_CMD_REJECT: 1117 iscsi_conn_printk(KERN_ERR, conn, 1118 "pdu (op 0x%x itt 0x%x) rejected. Too many " 1119 "immediate commands.\n", 1120 opcode, rejected_pdu.itt); 1121 /* 1122 * We only send one TMF at a time so if the target could not 1123 * handle it, then it should get fixed (RFC mandates that 1124 * a target can handle one immediate TMF per conn). 1125 * 1126 * For nops-outs, we could have sent more than one if 1127 * the target is sending us lots of nop-ins 1128 */ 1129 if (opcode != ISCSI_OP_NOOP_OUT) 1130 return 0; 1131 1132 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { 1133 /* 1134 * nop-out in response to target's nop-out rejected. 1135 * Just resend. 1136 */ 1137 /* In RX path we are under back lock */ 1138 spin_unlock(&conn->session->back_lock); 1139 spin_lock(&conn->session->frwd_lock); 1140 iscsi_send_nopout(conn, 1141 (struct iscsi_nopin*)&rejected_pdu); 1142 spin_unlock(&conn->session->frwd_lock); 1143 spin_lock(&conn->session->back_lock); 1144 } else { 1145 struct iscsi_task *task; 1146 /* 1147 * Our nop as ping got dropped. We know the target 1148 * and transport are ok so just clean up 1149 */ 1150 task = iscsi_itt_to_task(conn, rejected_pdu.itt); 1151 if (!task) { 1152 iscsi_conn_printk(KERN_ERR, conn, 1153 "Invalid pdu reject. Could " 1154 "not lookup rejected task.\n"); 1155 rc = ISCSI_ERR_BAD_ITT; 1156 } else 1157 rc = iscsi_nop_out_rsp(task, 1158 (struct iscsi_nopin*)&rejected_pdu, 1159 NULL, 0); 1160 } 1161 break; 1162 default: 1163 iscsi_conn_printk(KERN_ERR, conn, 1164 "pdu (op 0x%x itt 0x%x) rejected. Reason " 1165 "code 0x%x\n", rejected_pdu.opcode, 1166 rejected_pdu.itt, reject->reason); 1167 break; 1168 } 1169 return rc; 1170 } 1171 1172 /** 1173 * iscsi_itt_to_task - look up task by itt 1174 * @conn: iscsi connection 1175 * @itt: itt 1176 * 1177 * This should be used for mgmt tasks like login and nops, or if 1178 * the LDD's itt space does not include the session age. 1179 * 1180 * The session back_lock must be held. 1181 */ 1182 struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) 1183 { 1184 struct iscsi_session *session = conn->session; 1185 int i; 1186 1187 if (itt == RESERVED_ITT) 1188 return NULL; 1189 1190 if (session->tt->parse_pdu_itt) 1191 session->tt->parse_pdu_itt(conn, itt, &i, NULL); 1192 else 1193 i = get_itt(itt); 1194 if (i >= session->cmds_max) 1195 return NULL; 1196 1197 return session->cmds[i]; 1198 } 1199 EXPORT_SYMBOL_GPL(iscsi_itt_to_task); 1200 1201 /** 1202 * __iscsi_complete_pdu - complete pdu 1203 * @conn: iscsi conn 1204 * @hdr: iscsi header 1205 * @data: data buffer 1206 * @datalen: len of data buffer 1207 * 1208 * Completes pdu processing by freeing any resources allocated at 1209 * queuecommand or send generic. session back_lock must be held and verify 1210 * itt must have been called. 1211 */ 1212 int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 1213 char *data, int datalen) 1214 { 1215 struct iscsi_session *session = conn->session; 1216 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0; 1217 struct iscsi_task *task; 1218 uint32_t itt; 1219 1220 conn->last_recv = jiffies; 1221 rc = iscsi_verify_itt(conn, hdr->itt); 1222 if (rc) 1223 return rc; 1224 1225 if (hdr->itt != RESERVED_ITT) 1226 itt = get_itt(hdr->itt); 1227 else 1228 itt = ~0U; 1229 1230 ISCSI_DBG_SESSION(session, "[op 0x%x cid %d itt 0x%x len %d]\n", 1231 opcode, conn->id, itt, datalen); 1232 1233 if (itt == ~0U) { 1234 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 1235 1236 switch(opcode) { 1237 case ISCSI_OP_NOOP_IN: 1238 if (datalen) { 1239 rc = ISCSI_ERR_PROTO; 1240 break; 1241 } 1242 1243 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) 1244 break; 1245 1246 /* In RX path we are under back lock */ 1247 spin_unlock(&session->back_lock); 1248 spin_lock(&session->frwd_lock); 1249 iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr); 1250 spin_unlock(&session->frwd_lock); 1251 spin_lock(&session->back_lock); 1252 break; 1253 case ISCSI_OP_REJECT: 1254 rc = iscsi_handle_reject(conn, hdr, data, datalen); 1255 break; 1256 case ISCSI_OP_ASYNC_EVENT: 1257 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 1258 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 1259 rc = ISCSI_ERR_CONN_FAILED; 1260 break; 1261 default: 1262 rc = ISCSI_ERR_BAD_OPCODE; 1263 break; 1264 } 1265 goto out; 1266 } 1267 1268 switch(opcode) { 1269 case ISCSI_OP_SCSI_CMD_RSP: 1270 case ISCSI_OP_SCSI_DATA_IN: 1271 task = iscsi_itt_to_ctask(conn, hdr->itt); 1272 if (!task) 1273 return ISCSI_ERR_BAD_ITT; 1274 task->last_xfer = jiffies; 1275 break; 1276 case ISCSI_OP_R2T: 1277 /* 1278 * LLD handles R2Ts if they need to. 1279 */ 1280 return 0; 1281 case ISCSI_OP_LOGOUT_RSP: 1282 case ISCSI_OP_LOGIN_RSP: 1283 case ISCSI_OP_TEXT_RSP: 1284 case ISCSI_OP_SCSI_TMFUNC_RSP: 1285 case ISCSI_OP_NOOP_IN: 1286 task = iscsi_itt_to_task(conn, hdr->itt); 1287 if (!task) 1288 return ISCSI_ERR_BAD_ITT; 1289 break; 1290 default: 1291 return ISCSI_ERR_BAD_OPCODE; 1292 } 1293 1294 switch(opcode) { 1295 case ISCSI_OP_SCSI_CMD_RSP: 1296 iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen); 1297 break; 1298 case ISCSI_OP_SCSI_DATA_IN: 1299 iscsi_data_in_rsp(conn, hdr, task); 1300 break; 1301 case ISCSI_OP_LOGOUT_RSP: 1302 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 1303 if (datalen) { 1304 rc = ISCSI_ERR_PROTO; 1305 break; 1306 } 1307 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 1308 goto recv_pdu; 1309 case ISCSI_OP_LOGIN_RSP: 1310 case ISCSI_OP_TEXT_RSP: 1311 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 1312 /* 1313 * login related PDU's exp_statsn is handled in 1314 * userspace 1315 */ 1316 goto recv_pdu; 1317 case ISCSI_OP_SCSI_TMFUNC_RSP: 1318 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 1319 if (datalen) { 1320 rc = ISCSI_ERR_PROTO; 1321 break; 1322 } 1323 1324 iscsi_tmf_rsp(conn, hdr); 1325 iscsi_complete_task(task, ISCSI_TASK_COMPLETED); 1326 break; 1327 case ISCSI_OP_NOOP_IN: 1328 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); 1329 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) { 1330 rc = ISCSI_ERR_PROTO; 1331 break; 1332 } 1333 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 1334 1335 rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr, 1336 data, datalen); 1337 break; 1338 default: 1339 rc = ISCSI_ERR_BAD_OPCODE; 1340 break; 1341 } 1342 1343 out: 1344 return rc; 1345 recv_pdu: 1346 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) 1347 rc = ISCSI_ERR_CONN_FAILED; 1348 iscsi_complete_task(task, ISCSI_TASK_COMPLETED); 1349 return rc; 1350 } 1351 EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); 1352 1353 int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 1354 char *data, int datalen) 1355 { 1356 int rc; 1357 1358 spin_lock(&conn->session->back_lock); 1359 rc = __iscsi_complete_pdu(conn, hdr, data, datalen); 1360 spin_unlock(&conn->session->back_lock); 1361 return rc; 1362 } 1363 EXPORT_SYMBOL_GPL(iscsi_complete_pdu); 1364 1365 int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt) 1366 { 1367 struct iscsi_session *session = conn->session; 1368 int age = 0, i = 0; 1369 1370 if (itt == RESERVED_ITT) 1371 return 0; 1372 1373 if (session->tt->parse_pdu_itt) 1374 session->tt->parse_pdu_itt(conn, itt, &i, &age); 1375 else { 1376 i = get_itt(itt); 1377 age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK; 1378 } 1379 1380 if (age != session->age) { 1381 iscsi_conn_printk(KERN_ERR, conn, 1382 "received itt %x expected session age (%x)\n", 1383 (__force u32)itt, session->age); 1384 return ISCSI_ERR_BAD_ITT; 1385 } 1386 1387 if (i >= session->cmds_max) { 1388 iscsi_conn_printk(KERN_ERR, conn, 1389 "received invalid itt index %u (max cmds " 1390 "%u.\n", i, session->cmds_max); 1391 return ISCSI_ERR_BAD_ITT; 1392 } 1393 return 0; 1394 } 1395 EXPORT_SYMBOL_GPL(iscsi_verify_itt); 1396 1397 /** 1398 * iscsi_itt_to_ctask - look up ctask by itt 1399 * @conn: iscsi connection 1400 * @itt: itt 1401 * 1402 * This should be used for cmd tasks. 1403 * 1404 * The session back_lock must be held. 1405 */ 1406 struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt) 1407 { 1408 struct iscsi_task *task; 1409 1410 if (iscsi_verify_itt(conn, itt)) 1411 return NULL; 1412 1413 task = iscsi_itt_to_task(conn, itt); 1414 if (!task || !task->sc) 1415 return NULL; 1416 1417 if (iscsi_cmd(task->sc)->age != conn->session->age) { 1418 iscsi_session_printk(KERN_ERR, conn->session, 1419 "task's session age %d, expected %d\n", 1420 iscsi_cmd(task->sc)->age, conn->session->age); 1421 return NULL; 1422 } 1423 1424 return task; 1425 } 1426 EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask); 1427 1428 void iscsi_session_failure(struct iscsi_session *session, 1429 enum iscsi_err err) 1430 { 1431 struct iscsi_conn *conn; 1432 1433 spin_lock_bh(&session->frwd_lock); 1434 conn = session->leadconn; 1435 if (session->state == ISCSI_STATE_TERMINATE || !conn) { 1436 spin_unlock_bh(&session->frwd_lock); 1437 return; 1438 } 1439 1440 iscsi_get_conn(conn->cls_conn); 1441 spin_unlock_bh(&session->frwd_lock); 1442 /* 1443 * if the host is being removed bypass the connection 1444 * recovery initialization because we are going to kill 1445 * the session. 1446 */ 1447 if (err == ISCSI_ERR_INVALID_HOST) 1448 iscsi_conn_error_event(conn->cls_conn, err); 1449 else 1450 iscsi_conn_failure(conn, err); 1451 iscsi_put_conn(conn->cls_conn); 1452 } 1453 EXPORT_SYMBOL_GPL(iscsi_session_failure); 1454 1455 static bool iscsi_set_conn_failed(struct iscsi_conn *conn) 1456 { 1457 struct iscsi_session *session = conn->session; 1458 1459 if (session->state == ISCSI_STATE_FAILED) 1460 return false; 1461 1462 if (conn->stop_stage == 0) 1463 session->state = ISCSI_STATE_FAILED; 1464 1465 set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); 1466 set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); 1467 return true; 1468 } 1469 1470 void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) 1471 { 1472 struct iscsi_session *session = conn->session; 1473 bool needs_evt; 1474 1475 spin_lock_bh(&session->frwd_lock); 1476 needs_evt = iscsi_set_conn_failed(conn); 1477 spin_unlock_bh(&session->frwd_lock); 1478 1479 if (needs_evt) 1480 iscsi_conn_error_event(conn->cls_conn, err); 1481 } 1482 EXPORT_SYMBOL_GPL(iscsi_conn_failure); 1483 1484 static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn) 1485 { 1486 struct iscsi_session *session = conn->session; 1487 1488 /* 1489 * Check for iSCSI window and take care of CmdSN wrap-around 1490 */ 1491 if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) { 1492 ISCSI_DBG_SESSION(session, "iSCSI CmdSN closed. ExpCmdSn " 1493 "%u MaxCmdSN %u CmdSN %u/%u\n", 1494 session->exp_cmdsn, session->max_cmdsn, 1495 session->cmdsn, session->queued_cmdsn); 1496 return -ENOSPC; 1497 } 1498 return 0; 1499 } 1500 1501 static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task, 1502 bool was_requeue) 1503 { 1504 int rc; 1505 1506 if (!conn->task) { 1507 /* 1508 * Take a ref so we can access it after xmit_task(). 1509 * 1510 * This should never fail because the failure paths will have 1511 * stopped the xmit thread. 1512 */ 1513 if (!iscsi_get_task(task)) { 1514 WARN_ON_ONCE(1); 1515 return 0; 1516 } 1517 } else { 1518 /* Already have a ref from when we failed to send it last call */ 1519 conn->task = NULL; 1520 } 1521 1522 /* 1523 * If this was a requeue for a R2T we have an extra ref on the task in 1524 * case a bad target sends a cmd rsp before we have handled the task. 1525 */ 1526 if (was_requeue) 1527 iscsi_put_task(task); 1528 1529 /* 1530 * Do this after dropping the extra ref because if this was a requeue 1531 * it's removed from that list and cleanup_queued_task would miss it. 1532 */ 1533 if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { 1534 /* 1535 * Save the task and ref in case we weren't cleaning up this 1536 * task and get woken up again. 1537 */ 1538 conn->task = task; 1539 return -ENODATA; 1540 } 1541 1542 spin_unlock_bh(&conn->session->frwd_lock); 1543 rc = conn->session->tt->xmit_task(task); 1544 spin_lock_bh(&conn->session->frwd_lock); 1545 if (!rc) { 1546 /* done with this task */ 1547 task->last_xfer = jiffies; 1548 } else { 1549 /* 1550 * get an extra ref that is released next time we access it 1551 * as conn->task above. 1552 */ 1553 iscsi_get_task(task); 1554 conn->task = task; 1555 } 1556 1557 iscsi_put_task(task); 1558 return rc; 1559 } 1560 1561 /** 1562 * iscsi_requeue_task - requeue task to run from session workqueue 1563 * @task: task to requeue 1564 * 1565 * Callers must have taken a ref to the task that is going to be requeued. 1566 */ 1567 void iscsi_requeue_task(struct iscsi_task *task) 1568 { 1569 struct iscsi_conn *conn = task->conn; 1570 1571 /* 1572 * this may be on the requeue list already if the xmit_task callout 1573 * is handling the r2ts while we are adding new ones 1574 */ 1575 spin_lock_bh(&conn->session->frwd_lock); 1576 if (list_empty(&task->running)) { 1577 list_add_tail(&task->running, &conn->requeue); 1578 } else { 1579 /* 1580 * Don't need the extra ref since it's already requeued and 1581 * has a ref. 1582 */ 1583 iscsi_put_task(task); 1584 } 1585 iscsi_conn_queue_xmit(conn); 1586 spin_unlock_bh(&conn->session->frwd_lock); 1587 } 1588 EXPORT_SYMBOL_GPL(iscsi_requeue_task); 1589 1590 /** 1591 * iscsi_data_xmit - xmit any command into the scheduled connection 1592 * @conn: iscsi connection 1593 * 1594 * Notes: 1595 * The function can return -EAGAIN in which case the caller must 1596 * re-schedule it again later or recover. '0' return code means 1597 * successful xmit. 1598 **/ 1599 static int iscsi_data_xmit(struct iscsi_conn *conn) 1600 { 1601 struct iscsi_task *task; 1602 int rc = 0; 1603 1604 spin_lock_bh(&conn->session->frwd_lock); 1605 if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { 1606 ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n"); 1607 spin_unlock_bh(&conn->session->frwd_lock); 1608 return -ENODATA; 1609 } 1610 1611 if (conn->task) { 1612 rc = iscsi_xmit_task(conn, conn->task, false); 1613 if (rc) 1614 goto done; 1615 } 1616 1617 /* 1618 * process mgmt pdus like nops before commands since we should 1619 * only have one nop-out as a ping from us and targets should not 1620 * overflow us with nop-ins 1621 */ 1622 check_mgmt: 1623 while (!list_empty(&conn->mgmtqueue)) { 1624 task = list_entry(conn->mgmtqueue.next, struct iscsi_task, 1625 running); 1626 list_del_init(&task->running); 1627 if (iscsi_prep_mgmt_task(conn, task)) { 1628 /* regular RX path uses back_lock */ 1629 spin_lock_bh(&conn->session->back_lock); 1630 __iscsi_put_task(task); 1631 spin_unlock_bh(&conn->session->back_lock); 1632 continue; 1633 } 1634 rc = iscsi_xmit_task(conn, task, false); 1635 if (rc) 1636 goto done; 1637 } 1638 1639 check_requeue: 1640 while (!list_empty(&conn->requeue)) { 1641 /* 1642 * we always do fastlogout - conn stop code will clean up. 1643 */ 1644 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) 1645 break; 1646 1647 task = list_entry(conn->requeue.next, struct iscsi_task, 1648 running); 1649 1650 if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT)) 1651 break; 1652 1653 list_del_init(&task->running); 1654 rc = iscsi_xmit_task(conn, task, true); 1655 if (rc) 1656 goto done; 1657 if (!list_empty(&conn->mgmtqueue)) 1658 goto check_mgmt; 1659 } 1660 1661 /* process pending command queue */ 1662 while (!list_empty(&conn->cmdqueue)) { 1663 task = list_entry(conn->cmdqueue.next, struct iscsi_task, 1664 running); 1665 list_del_init(&task->running); 1666 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { 1667 fail_scsi_task(task, DID_IMM_RETRY); 1668 continue; 1669 } 1670 rc = iscsi_prep_scsi_cmd_pdu(task); 1671 if (rc) { 1672 if (rc == -ENOMEM || rc == -EACCES) 1673 fail_scsi_task(task, DID_IMM_RETRY); 1674 else 1675 fail_scsi_task(task, DID_ABORT); 1676 continue; 1677 } 1678 rc = iscsi_xmit_task(conn, task, false); 1679 if (rc) 1680 goto done; 1681 /* 1682 * we could continuously get new task requests so 1683 * we need to check the mgmt queue for nops that need to 1684 * be sent to aviod starvation 1685 */ 1686 if (!list_empty(&conn->mgmtqueue)) 1687 goto check_mgmt; 1688 if (!list_empty(&conn->requeue)) 1689 goto check_requeue; 1690 } 1691 1692 spin_unlock_bh(&conn->session->frwd_lock); 1693 return -ENODATA; 1694 1695 done: 1696 spin_unlock_bh(&conn->session->frwd_lock); 1697 return rc; 1698 } 1699 1700 static void iscsi_xmitworker(struct work_struct *work) 1701 { 1702 struct iscsi_conn *conn = 1703 container_of(work, struct iscsi_conn, xmitwork); 1704 int rc; 1705 /* 1706 * serialize Xmit worker on a per-connection basis. 1707 */ 1708 do { 1709 rc = iscsi_data_xmit(conn); 1710 } while (rc >= 0 || rc == -EAGAIN); 1711 } 1712 1713 static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn, 1714 struct scsi_cmnd *sc) 1715 { 1716 struct iscsi_task *task; 1717 1718 if (!kfifo_out(&conn->session->cmdpool.queue, 1719 (void *) &task, sizeof(void *))) 1720 return NULL; 1721 1722 iscsi_cmd(sc)->age = conn->session->age; 1723 iscsi_cmd(sc)->task = task; 1724 1725 refcount_set(&task->refcount, 1); 1726 task->state = ISCSI_TASK_PENDING; 1727 task->conn = conn; 1728 task->sc = sc; 1729 task->have_checked_conn = false; 1730 task->last_timeout = jiffies; 1731 task->last_xfer = jiffies; 1732 task->protected = false; 1733 INIT_LIST_HEAD(&task->running); 1734 return task; 1735 } 1736 1737 enum { 1738 FAILURE_BAD_HOST = 1, 1739 FAILURE_SESSION_FAILED, 1740 FAILURE_SESSION_FREED, 1741 FAILURE_WINDOW_CLOSED, 1742 FAILURE_OOM, 1743 FAILURE_SESSION_TERMINATE, 1744 FAILURE_SESSION_IN_RECOVERY, 1745 FAILURE_SESSION_RECOVERY_TIMEOUT, 1746 FAILURE_SESSION_LOGGING_OUT, 1747 FAILURE_SESSION_NOT_READY, 1748 }; 1749 1750 int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) 1751 { 1752 struct iscsi_cls_session *cls_session; 1753 struct iscsi_host *ihost; 1754 int reason = 0; 1755 struct iscsi_session *session; 1756 struct iscsi_conn *conn; 1757 struct iscsi_task *task = NULL; 1758 1759 sc->result = 0; 1760 iscsi_cmd(sc)->task = NULL; 1761 1762 ihost = shost_priv(host); 1763 1764 cls_session = starget_to_session(scsi_target(sc->device)); 1765 session = cls_session->dd_data; 1766 spin_lock_bh(&session->frwd_lock); 1767 1768 reason = iscsi_session_chkready(cls_session); 1769 if (reason) { 1770 sc->result = reason; 1771 goto fault; 1772 } 1773 1774 if (session->state != ISCSI_STATE_LOGGED_IN) { 1775 /* 1776 * to handle the race between when we set the recovery state 1777 * and block the session we requeue here (commands could 1778 * be entering our queuecommand while a block is starting 1779 * up because the block code is not locked) 1780 */ 1781 switch (session->state) { 1782 case ISCSI_STATE_FAILED: 1783 /* 1784 * cmds should fail during shutdown, if the session 1785 * state is bad, allowing completion to happen 1786 */ 1787 if (unlikely(system_state != SYSTEM_RUNNING)) { 1788 reason = FAILURE_SESSION_FAILED; 1789 sc->result = DID_NO_CONNECT << 16; 1790 break; 1791 } 1792 fallthrough; 1793 case ISCSI_STATE_IN_RECOVERY: 1794 reason = FAILURE_SESSION_IN_RECOVERY; 1795 sc->result = DID_IMM_RETRY << 16; 1796 break; 1797 case ISCSI_STATE_LOGGING_OUT: 1798 reason = FAILURE_SESSION_LOGGING_OUT; 1799 sc->result = DID_IMM_RETRY << 16; 1800 break; 1801 case ISCSI_STATE_RECOVERY_FAILED: 1802 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1803 sc->result = DID_TRANSPORT_FAILFAST << 16; 1804 break; 1805 case ISCSI_STATE_TERMINATE: 1806 reason = FAILURE_SESSION_TERMINATE; 1807 sc->result = DID_NO_CONNECT << 16; 1808 break; 1809 default: 1810 reason = FAILURE_SESSION_FREED; 1811 sc->result = DID_NO_CONNECT << 16; 1812 } 1813 goto fault; 1814 } 1815 1816 conn = session->leadconn; 1817 if (!conn) { 1818 reason = FAILURE_SESSION_FREED; 1819 sc->result = DID_NO_CONNECT << 16; 1820 goto fault; 1821 } 1822 1823 if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) { 1824 reason = FAILURE_SESSION_IN_RECOVERY; 1825 sc->result = DID_REQUEUE << 16; 1826 goto fault; 1827 } 1828 1829 if (iscsi_check_cmdsn_window_closed(conn)) { 1830 reason = FAILURE_WINDOW_CLOSED; 1831 goto reject; 1832 } 1833 1834 task = iscsi_alloc_task(conn, sc); 1835 if (!task) { 1836 reason = FAILURE_OOM; 1837 goto reject; 1838 } 1839 1840 if (!ihost->workq) { 1841 reason = iscsi_prep_scsi_cmd_pdu(task); 1842 if (reason) { 1843 if (reason == -ENOMEM || reason == -EACCES) { 1844 reason = FAILURE_OOM; 1845 goto prepd_reject; 1846 } else { 1847 sc->result = DID_ABORT << 16; 1848 goto prepd_fault; 1849 } 1850 } 1851 if (session->tt->xmit_task(task)) { 1852 session->cmdsn--; 1853 reason = FAILURE_SESSION_NOT_READY; 1854 goto prepd_reject; 1855 } 1856 } else { 1857 list_add_tail(&task->running, &conn->cmdqueue); 1858 iscsi_conn_queue_xmit(conn); 1859 } 1860 1861 session->queued_cmdsn++; 1862 spin_unlock_bh(&session->frwd_lock); 1863 return 0; 1864 1865 prepd_reject: 1866 spin_lock_bh(&session->back_lock); 1867 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); 1868 spin_unlock_bh(&session->back_lock); 1869 reject: 1870 spin_unlock_bh(&session->frwd_lock); 1871 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", 1872 sc->cmnd[0], reason); 1873 return SCSI_MLQUEUE_TARGET_BUSY; 1874 1875 prepd_fault: 1876 spin_lock_bh(&session->back_lock); 1877 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); 1878 spin_unlock_bh(&session->back_lock); 1879 fault: 1880 spin_unlock_bh(&session->frwd_lock); 1881 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", 1882 sc->cmnd[0], reason); 1883 scsi_set_resid(sc, scsi_bufflen(sc)); 1884 scsi_done(sc); 1885 return 0; 1886 } 1887 EXPORT_SYMBOL_GPL(iscsi_queuecommand); 1888 1889 int iscsi_target_alloc(struct scsi_target *starget) 1890 { 1891 struct iscsi_cls_session *cls_session = starget_to_session(starget); 1892 struct iscsi_session *session = cls_session->dd_data; 1893 1894 starget->can_queue = session->scsi_cmds_max; 1895 return 0; 1896 } 1897 EXPORT_SYMBOL_GPL(iscsi_target_alloc); 1898 1899 static void iscsi_tmf_timedout(struct timer_list *t) 1900 { 1901 struct iscsi_session *session = from_timer(session, t, tmf_timer); 1902 1903 spin_lock(&session->frwd_lock); 1904 if (session->tmf_state == TMF_QUEUED) { 1905 session->tmf_state = TMF_TIMEDOUT; 1906 ISCSI_DBG_EH(session, "tmf timedout\n"); 1907 /* unblock eh_abort() */ 1908 wake_up(&session->ehwait); 1909 } 1910 spin_unlock(&session->frwd_lock); 1911 } 1912 1913 static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, 1914 struct iscsi_tm *hdr, int age, 1915 int timeout) 1916 __must_hold(&session->frwd_lock) 1917 { 1918 struct iscsi_session *session = conn->session; 1919 1920 if (__iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0)) { 1921 spin_unlock_bh(&session->frwd_lock); 1922 iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n"); 1923 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1924 spin_lock_bh(&session->frwd_lock); 1925 return -EPERM; 1926 } 1927 conn->tmfcmd_pdus_cnt++; 1928 session->tmf_timer.expires = timeout * HZ + jiffies; 1929 add_timer(&session->tmf_timer); 1930 ISCSI_DBG_EH(session, "tmf set timeout\n"); 1931 1932 spin_unlock_bh(&session->frwd_lock); 1933 mutex_unlock(&session->eh_mutex); 1934 1935 /* 1936 * block eh thread until: 1937 * 1938 * 1) tmf response 1939 * 2) tmf timeout 1940 * 3) session is terminated or restarted or userspace has 1941 * given up on recovery 1942 */ 1943 wait_event_interruptible(session->ehwait, age != session->age || 1944 session->state != ISCSI_STATE_LOGGED_IN || 1945 session->tmf_state != TMF_QUEUED); 1946 if (signal_pending(current)) 1947 flush_signals(current); 1948 del_timer_sync(&session->tmf_timer); 1949 1950 mutex_lock(&session->eh_mutex); 1951 spin_lock_bh(&session->frwd_lock); 1952 /* if the session drops it will clean up the task */ 1953 if (age != session->age || 1954 session->state != ISCSI_STATE_LOGGED_IN) 1955 return -ENOTCONN; 1956 return 0; 1957 } 1958 1959 /* 1960 * Fail commands. session frwd lock held and xmit thread flushed. 1961 */ 1962 static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error) 1963 { 1964 struct iscsi_session *session = conn->session; 1965 struct iscsi_task *task; 1966 int i; 1967 1968 restart_cmd_loop: 1969 spin_lock_bh(&session->back_lock); 1970 for (i = 0; i < session->cmds_max; i++) { 1971 task = session->cmds[i]; 1972 if (!task->sc || task->state == ISCSI_TASK_FREE) 1973 continue; 1974 1975 if (lun != -1 && lun != task->sc->device->lun) 1976 continue; 1977 /* 1978 * The cmd is completing but if this is called from an eh 1979 * callout path then when we return scsi-ml owns the cmd. Wait 1980 * for the completion path to finish freeing the cmd. 1981 */ 1982 if (!iscsi_get_task(task)) { 1983 spin_unlock_bh(&session->back_lock); 1984 spin_unlock_bh(&session->frwd_lock); 1985 udelay(ISCSI_CMD_COMPL_WAIT); 1986 spin_lock_bh(&session->frwd_lock); 1987 goto restart_cmd_loop; 1988 } 1989 1990 ISCSI_DBG_SESSION(session, 1991 "failing sc %p itt 0x%x state %d\n", 1992 task->sc, task->itt, task->state); 1993 __fail_scsi_task(task, error); 1994 __iscsi_put_task(task); 1995 } 1996 spin_unlock_bh(&session->back_lock); 1997 } 1998 1999 /** 2000 * iscsi_suspend_queue - suspend iscsi_queuecommand 2001 * @conn: iscsi conn to stop queueing IO on 2002 * 2003 * This grabs the session frwd_lock to make sure no one is in 2004 * xmit_task/queuecommand, and then sets suspend to prevent 2005 * new commands from being queued. This only needs to be called 2006 * by offload drivers that need to sync a path like ep disconnect 2007 * with the iscsi_queuecommand/xmit_task. To start IO again libiscsi 2008 * will call iscsi_start_tx and iscsi_unblock_session when in FFP. 2009 */ 2010 void iscsi_suspend_queue(struct iscsi_conn *conn) 2011 { 2012 spin_lock_bh(&conn->session->frwd_lock); 2013 set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); 2014 spin_unlock_bh(&conn->session->frwd_lock); 2015 } 2016 EXPORT_SYMBOL_GPL(iscsi_suspend_queue); 2017 2018 /** 2019 * iscsi_suspend_tx - suspend iscsi_data_xmit 2020 * @conn: iscsi conn to stop processing IO on. 2021 * 2022 * This function sets the suspend bit to prevent iscsi_data_xmit 2023 * from sending new IO, and if work is queued on the xmit thread 2024 * it will wait for it to be completed. 2025 */ 2026 void iscsi_suspend_tx(struct iscsi_conn *conn) 2027 { 2028 struct Scsi_Host *shost = conn->session->host; 2029 struct iscsi_host *ihost = shost_priv(shost); 2030 2031 set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); 2032 if (ihost->workq) 2033 flush_work(&conn->xmitwork); 2034 } 2035 EXPORT_SYMBOL_GPL(iscsi_suspend_tx); 2036 2037 static void iscsi_start_tx(struct iscsi_conn *conn) 2038 { 2039 clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); 2040 iscsi_conn_queue_xmit(conn); 2041 } 2042 2043 /** 2044 * iscsi_suspend_rx - Prevent recvwork from running again. 2045 * @conn: iscsi conn to stop. 2046 */ 2047 void iscsi_suspend_rx(struct iscsi_conn *conn) 2048 { 2049 struct Scsi_Host *shost = conn->session->host; 2050 struct iscsi_host *ihost = shost_priv(shost); 2051 2052 set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); 2053 if (ihost->workq) 2054 flush_work(&conn->recvwork); 2055 } 2056 EXPORT_SYMBOL_GPL(iscsi_suspend_rx); 2057 2058 /* 2059 * We want to make sure a ping is in flight. It has timed out. 2060 * And we are not busy processing a pdu that is making 2061 * progress but got started before the ping and is taking a while 2062 * to complete so the ping is just stuck behind it in a queue. 2063 */ 2064 static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) 2065 { 2066 if (READ_ONCE(conn->ping_task) && 2067 time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 2068 (conn->ping_timeout * HZ), jiffies)) 2069 return 1; 2070 else 2071 return 0; 2072 } 2073 2074 enum scsi_timeout_action iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) 2075 { 2076 enum scsi_timeout_action rc = SCSI_EH_NOT_HANDLED; 2077 struct iscsi_task *task = NULL, *running_task; 2078 struct iscsi_cls_session *cls_session; 2079 struct iscsi_session *session; 2080 struct iscsi_conn *conn; 2081 int i; 2082 2083 cls_session = starget_to_session(scsi_target(sc->device)); 2084 session = cls_session->dd_data; 2085 2086 ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); 2087 2088 spin_lock_bh(&session->frwd_lock); 2089 spin_lock(&session->back_lock); 2090 task = iscsi_cmd(sc)->task; 2091 if (!task) { 2092 /* 2093 * Raced with completion. Blk layer has taken ownership 2094 * so let timeout code complete it now. 2095 */ 2096 rc = SCSI_EH_NOT_HANDLED; 2097 spin_unlock(&session->back_lock); 2098 goto done; 2099 } 2100 if (!iscsi_get_task(task)) { 2101 /* 2102 * Racing with the completion path right now, so give it more 2103 * time so that path can complete it like normal. 2104 */ 2105 rc = SCSI_EH_RESET_TIMER; 2106 task = NULL; 2107 spin_unlock(&session->back_lock); 2108 goto done; 2109 } 2110 spin_unlock(&session->back_lock); 2111 2112 if (session->state != ISCSI_STATE_LOGGED_IN) { 2113 /* 2114 * During shutdown, if session is prematurely disconnected, 2115 * recovery won't happen and there will be hung cmds. Not 2116 * handling cmds would trigger EH, also bad in this case. 2117 * Instead, handle cmd, allow completion to happen and let 2118 * upper layer to deal with the result. 2119 */ 2120 if (unlikely(system_state != SYSTEM_RUNNING)) { 2121 sc->result = DID_NO_CONNECT << 16; 2122 ISCSI_DBG_EH(session, "sc on shutdown, handled\n"); 2123 rc = SCSI_EH_NOT_HANDLED; 2124 goto done; 2125 } 2126 /* 2127 * We are probably in the middle of iscsi recovery so let 2128 * that complete and handle the error. 2129 */ 2130 rc = SCSI_EH_RESET_TIMER; 2131 goto done; 2132 } 2133 2134 conn = session->leadconn; 2135 if (!conn) { 2136 /* In the middle of shuting down */ 2137 rc = SCSI_EH_RESET_TIMER; 2138 goto done; 2139 } 2140 2141 /* 2142 * If we have sent (at least queued to the network layer) a pdu or 2143 * recvd one for the task since the last timeout ask for 2144 * more time. If on the next timeout we have not made progress 2145 * we can check if it is the task or connection when we send the 2146 * nop as a ping. 2147 */ 2148 if (time_after(task->last_xfer, task->last_timeout)) { 2149 ISCSI_DBG_EH(session, "Command making progress. Asking " 2150 "scsi-ml for more time to complete. " 2151 "Last data xfer at %lu. Last timeout was at " 2152 "%lu\n.", task->last_xfer, task->last_timeout); 2153 task->have_checked_conn = false; 2154 rc = SCSI_EH_RESET_TIMER; 2155 goto done; 2156 } 2157 2158 if (!conn->recv_timeout && !conn->ping_timeout) 2159 goto done; 2160 /* 2161 * if the ping timedout then we are in the middle of cleaning up 2162 * and can let the iscsi eh handle it 2163 */ 2164 if (iscsi_has_ping_timed_out(conn)) { 2165 rc = SCSI_EH_RESET_TIMER; 2166 goto done; 2167 } 2168 2169 spin_lock(&session->back_lock); 2170 for (i = 0; i < conn->session->cmds_max; i++) { 2171 running_task = conn->session->cmds[i]; 2172 if (!running_task->sc || running_task == task || 2173 running_task->state != ISCSI_TASK_RUNNING) 2174 continue; 2175 2176 /* 2177 * Only check if cmds started before this one have made 2178 * progress, or this could never fail 2179 */ 2180 if (time_after(running_task->sc->jiffies_at_alloc, 2181 task->sc->jiffies_at_alloc)) 2182 continue; 2183 2184 if (time_after(running_task->last_xfer, task->last_timeout)) { 2185 /* 2186 * This task has not made progress, but a task 2187 * started before us has transferred data since 2188 * we started/last-checked. We could be queueing 2189 * too many tasks or the LU is bad. 2190 * 2191 * If the device is bad the cmds ahead of us on 2192 * other devs will complete, and this loop will 2193 * eventually fail starting the scsi eh. 2194 */ 2195 ISCSI_DBG_EH(session, "Command has not made progress " 2196 "but commands ahead of it have. " 2197 "Asking scsi-ml for more time to " 2198 "complete. Our last xfer vs running task " 2199 "last xfer %lu/%lu. Last check %lu.\n", 2200 task->last_xfer, running_task->last_xfer, 2201 task->last_timeout); 2202 spin_unlock(&session->back_lock); 2203 rc = SCSI_EH_RESET_TIMER; 2204 goto done; 2205 } 2206 } 2207 spin_unlock(&session->back_lock); 2208 2209 /* Assumes nop timeout is shorter than scsi cmd timeout */ 2210 if (task->have_checked_conn) 2211 goto done; 2212 2213 /* 2214 * Checking the transport already or nop from a cmd timeout still 2215 * running 2216 */ 2217 if (READ_ONCE(conn->ping_task)) { 2218 task->have_checked_conn = true; 2219 rc = SCSI_EH_RESET_TIMER; 2220 goto done; 2221 } 2222 2223 /* Make sure there is a transport check done */ 2224 iscsi_send_nopout(conn, NULL); 2225 task->have_checked_conn = true; 2226 rc = SCSI_EH_RESET_TIMER; 2227 2228 done: 2229 spin_unlock_bh(&session->frwd_lock); 2230 2231 if (task) { 2232 task->last_timeout = jiffies; 2233 iscsi_put_task(task); 2234 } 2235 ISCSI_DBG_EH(session, "return %s\n", rc == SCSI_EH_RESET_TIMER ? 2236 "timer reset" : "shutdown or nh"); 2237 return rc; 2238 } 2239 EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out); 2240 2241 static void iscsi_check_transport_timeouts(struct timer_list *t) 2242 { 2243 struct iscsi_conn *conn = from_timer(conn, t, transport_timer); 2244 struct iscsi_session *session = conn->session; 2245 unsigned long recv_timeout, next_timeout = 0, last_recv; 2246 2247 spin_lock(&session->frwd_lock); 2248 if (session->state != ISCSI_STATE_LOGGED_IN) 2249 goto done; 2250 2251 recv_timeout = conn->recv_timeout; 2252 if (!recv_timeout) 2253 goto done; 2254 2255 recv_timeout *= HZ; 2256 last_recv = conn->last_recv; 2257 2258 if (iscsi_has_ping_timed_out(conn)) { 2259 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " 2260 "expired, recv timeout %d, last rx %lu, " 2261 "last ping %lu, now %lu\n", 2262 conn->ping_timeout, conn->recv_timeout, 2263 last_recv, conn->last_ping, jiffies); 2264 spin_unlock(&session->frwd_lock); 2265 iscsi_conn_failure(conn, ISCSI_ERR_NOP_TIMEDOUT); 2266 return; 2267 } 2268 2269 if (time_before_eq(last_recv + recv_timeout, jiffies)) { 2270 /* send a ping to try to provoke some traffic */ 2271 ISCSI_DBG_CONN(conn, "Sending nopout as ping\n"); 2272 if (iscsi_send_nopout(conn, NULL)) 2273 next_timeout = jiffies + (1 * HZ); 2274 else 2275 next_timeout = conn->last_ping + (conn->ping_timeout * HZ); 2276 } else 2277 next_timeout = last_recv + recv_timeout; 2278 2279 ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout); 2280 mod_timer(&conn->transport_timer, next_timeout); 2281 done: 2282 spin_unlock(&session->frwd_lock); 2283 } 2284 2285 /** 2286 * iscsi_conn_unbind - prevent queueing to conn. 2287 * @cls_conn: iscsi conn ep is bound to. 2288 * @is_active: is the conn in use for boot or is this for EH/termination 2289 * 2290 * This must be called by drivers implementing the ep_disconnect callout. 2291 * It disables queueing to the connection from libiscsi in preparation for 2292 * an ep_disconnect call. 2293 */ 2294 void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active) 2295 { 2296 struct iscsi_session *session; 2297 struct iscsi_conn *conn; 2298 2299 if (!cls_conn) 2300 return; 2301 2302 conn = cls_conn->dd_data; 2303 session = conn->session; 2304 /* 2305 * Wait for iscsi_eh calls to exit. We don't wait for the tmf to 2306 * complete or timeout. The caller just wants to know what's running 2307 * is everything that needs to be cleaned up, and no cmds will be 2308 * queued. 2309 */ 2310 mutex_lock(&session->eh_mutex); 2311 2312 iscsi_suspend_queue(conn); 2313 iscsi_suspend_tx(conn); 2314 2315 spin_lock_bh(&session->frwd_lock); 2316 clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags); 2317 2318 if (!is_active) { 2319 /* 2320 * if logout timed out before userspace could even send a PDU 2321 * the state might still be in ISCSI_STATE_LOGGED_IN and 2322 * allowing new cmds and TMFs. 2323 */ 2324 if (session->state == ISCSI_STATE_LOGGED_IN) 2325 iscsi_set_conn_failed(conn); 2326 } 2327 spin_unlock_bh(&session->frwd_lock); 2328 mutex_unlock(&session->eh_mutex); 2329 } 2330 EXPORT_SYMBOL_GPL(iscsi_conn_unbind); 2331 2332 static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, 2333 struct iscsi_tm *hdr) 2334 { 2335 memset(hdr, 0, sizeof(*hdr)); 2336 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 2337 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; 2338 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2339 hdr->lun = task->lun; 2340 hdr->rtt = task->hdr_itt; 2341 hdr->refcmdsn = task->cmdsn; 2342 } 2343 2344 int iscsi_eh_abort(struct scsi_cmnd *sc) 2345 { 2346 struct iscsi_cls_session *cls_session; 2347 struct iscsi_session *session; 2348 struct iscsi_conn *conn; 2349 struct iscsi_task *task; 2350 struct iscsi_tm *hdr; 2351 int age; 2352 2353 cls_session = starget_to_session(scsi_target(sc->device)); 2354 session = cls_session->dd_data; 2355 2356 ISCSI_DBG_EH(session, "aborting sc %p\n", sc); 2357 2358 completion_check: 2359 mutex_lock(&session->eh_mutex); 2360 spin_lock_bh(&session->frwd_lock); 2361 /* 2362 * if session was ISCSI_STATE_IN_RECOVERY then we may not have 2363 * got the command. 2364 */ 2365 if (!iscsi_cmd(sc)->task) { 2366 ISCSI_DBG_EH(session, "sc never reached iscsi layer or " 2367 "it completed.\n"); 2368 spin_unlock_bh(&session->frwd_lock); 2369 mutex_unlock(&session->eh_mutex); 2370 return SUCCESS; 2371 } 2372 2373 /* 2374 * If we are not logged in or we have started a new session 2375 * then let the host reset code handle this 2376 */ 2377 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN || 2378 iscsi_cmd(sc)->age != session->age) { 2379 spin_unlock_bh(&session->frwd_lock); 2380 mutex_unlock(&session->eh_mutex); 2381 ISCSI_DBG_EH(session, "failing abort due to dropped " 2382 "session.\n"); 2383 return FAILED; 2384 } 2385 2386 spin_lock(&session->back_lock); 2387 task = iscsi_cmd(sc)->task; 2388 if (!task || !task->sc) { 2389 /* task completed before time out */ 2390 ISCSI_DBG_EH(session, "sc completed while abort in progress\n"); 2391 2392 spin_unlock(&session->back_lock); 2393 spin_unlock_bh(&session->frwd_lock); 2394 mutex_unlock(&session->eh_mutex); 2395 return SUCCESS; 2396 } 2397 2398 if (!iscsi_get_task(task)) { 2399 spin_unlock(&session->back_lock); 2400 spin_unlock_bh(&session->frwd_lock); 2401 mutex_unlock(&session->eh_mutex); 2402 /* We are just about to call iscsi_free_task so wait for it. */ 2403 udelay(ISCSI_CMD_COMPL_WAIT); 2404 goto completion_check; 2405 } 2406 2407 ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt); 2408 conn = session->leadconn; 2409 iscsi_get_conn(conn->cls_conn); 2410 conn->eh_abort_cnt++; 2411 age = session->age; 2412 spin_unlock(&session->back_lock); 2413 2414 if (task->state == ISCSI_TASK_PENDING) { 2415 fail_scsi_task(task, DID_ABORT); 2416 goto success; 2417 } 2418 2419 /* only have one tmf outstanding at a time */ 2420 if (session->tmf_state != TMF_INITIAL) 2421 goto failed; 2422 session->tmf_state = TMF_QUEUED; 2423 2424 hdr = &session->tmhdr; 2425 iscsi_prep_abort_task_pdu(task, hdr); 2426 2427 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) 2428 goto failed; 2429 2430 switch (session->tmf_state) { 2431 case TMF_SUCCESS: 2432 spin_unlock_bh(&session->frwd_lock); 2433 /* 2434 * stop tx side incase the target had sent a abort rsp but 2435 * the initiator was still writing out data. 2436 */ 2437 iscsi_suspend_tx(conn); 2438 /* 2439 * we do not stop the recv side because targets have been 2440 * good and have never sent us a successful tmf response 2441 * then sent more data for the cmd. 2442 */ 2443 spin_lock_bh(&session->frwd_lock); 2444 fail_scsi_task(task, DID_ABORT); 2445 session->tmf_state = TMF_INITIAL; 2446 memset(hdr, 0, sizeof(*hdr)); 2447 spin_unlock_bh(&session->frwd_lock); 2448 iscsi_start_tx(conn); 2449 goto success_unlocked; 2450 case TMF_TIMEDOUT: 2451 session->running_aborted_task = task; 2452 spin_unlock_bh(&session->frwd_lock); 2453 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); 2454 goto failed_unlocked; 2455 case TMF_NOT_FOUND: 2456 if (iscsi_task_is_completed(task)) { 2457 session->tmf_state = TMF_INITIAL; 2458 memset(hdr, 0, sizeof(*hdr)); 2459 /* task completed before tmf abort response */ 2460 ISCSI_DBG_EH(session, "sc completed while abort in " 2461 "progress\n"); 2462 goto success; 2463 } 2464 fallthrough; 2465 default: 2466 session->tmf_state = TMF_INITIAL; 2467 goto failed; 2468 } 2469 2470 success: 2471 spin_unlock_bh(&session->frwd_lock); 2472 success_unlocked: 2473 ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n", 2474 sc, task->itt); 2475 iscsi_put_task(task); 2476 iscsi_put_conn(conn->cls_conn); 2477 mutex_unlock(&session->eh_mutex); 2478 return SUCCESS; 2479 2480 failed: 2481 spin_unlock_bh(&session->frwd_lock); 2482 failed_unlocked: 2483 ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc, 2484 task ? task->itt : 0); 2485 /* 2486 * The driver might be accessing the task so hold the ref. The conn 2487 * stop cleanup will drop the ref after ep_disconnect so we know the 2488 * driver's no longer touching the task. 2489 */ 2490 if (!session->running_aborted_task) 2491 iscsi_put_task(task); 2492 2493 iscsi_put_conn(conn->cls_conn); 2494 mutex_unlock(&session->eh_mutex); 2495 return FAILED; 2496 } 2497 EXPORT_SYMBOL_GPL(iscsi_eh_abort); 2498 2499 static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) 2500 { 2501 memset(hdr, 0, sizeof(*hdr)); 2502 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 2503 hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK; 2504 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2505 int_to_scsilun(sc->device->lun, &hdr->lun); 2506 hdr->rtt = RESERVED_ITT; 2507 } 2508 2509 int iscsi_eh_device_reset(struct scsi_cmnd *sc) 2510 { 2511 struct iscsi_cls_session *cls_session; 2512 struct iscsi_session *session; 2513 struct iscsi_conn *conn; 2514 struct iscsi_tm *hdr; 2515 int rc = FAILED; 2516 2517 cls_session = starget_to_session(scsi_target(sc->device)); 2518 session = cls_session->dd_data; 2519 2520 ISCSI_DBG_EH(session, "LU Reset [sc %p lun %llu]\n", sc, 2521 sc->device->lun); 2522 2523 mutex_lock(&session->eh_mutex); 2524 spin_lock_bh(&session->frwd_lock); 2525 /* 2526 * Just check if we are not logged in. We cannot check for 2527 * the phase because the reset could come from a ioctl. 2528 */ 2529 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) 2530 goto unlock; 2531 conn = session->leadconn; 2532 2533 /* only have one tmf outstanding at a time */ 2534 if (session->tmf_state != TMF_INITIAL) 2535 goto unlock; 2536 session->tmf_state = TMF_QUEUED; 2537 2538 hdr = &session->tmhdr; 2539 iscsi_prep_lun_reset_pdu(sc, hdr); 2540 2541 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, 2542 session->lu_reset_timeout)) { 2543 rc = FAILED; 2544 goto unlock; 2545 } 2546 2547 switch (session->tmf_state) { 2548 case TMF_SUCCESS: 2549 break; 2550 case TMF_TIMEDOUT: 2551 spin_unlock_bh(&session->frwd_lock); 2552 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); 2553 goto done; 2554 default: 2555 session->tmf_state = TMF_INITIAL; 2556 goto unlock; 2557 } 2558 2559 rc = SUCCESS; 2560 spin_unlock_bh(&session->frwd_lock); 2561 2562 iscsi_suspend_tx(conn); 2563 2564 spin_lock_bh(&session->frwd_lock); 2565 memset(hdr, 0, sizeof(*hdr)); 2566 fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); 2567 session->tmf_state = TMF_INITIAL; 2568 spin_unlock_bh(&session->frwd_lock); 2569 2570 iscsi_start_tx(conn); 2571 goto done; 2572 2573 unlock: 2574 spin_unlock_bh(&session->frwd_lock); 2575 done: 2576 ISCSI_DBG_EH(session, "dev reset result = %s\n", 2577 rc == SUCCESS ? "SUCCESS" : "FAILED"); 2578 mutex_unlock(&session->eh_mutex); 2579 return rc; 2580 } 2581 EXPORT_SYMBOL_GPL(iscsi_eh_device_reset); 2582 2583 void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) 2584 { 2585 struct iscsi_session *session = cls_session->dd_data; 2586 2587 spin_lock_bh(&session->frwd_lock); 2588 if (session->state != ISCSI_STATE_LOGGED_IN) { 2589 session->state = ISCSI_STATE_RECOVERY_FAILED; 2590 wake_up(&session->ehwait); 2591 } 2592 spin_unlock_bh(&session->frwd_lock); 2593 } 2594 EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); 2595 2596 /** 2597 * iscsi_eh_session_reset - drop session and attempt relogin 2598 * @sc: scsi command 2599 * 2600 * This function will wait for a relogin, session termination from 2601 * userspace, or a recovery/replacement timeout. 2602 */ 2603 int iscsi_eh_session_reset(struct scsi_cmnd *sc) 2604 { 2605 struct iscsi_cls_session *cls_session; 2606 struct iscsi_session *session; 2607 struct iscsi_conn *conn; 2608 2609 cls_session = starget_to_session(scsi_target(sc->device)); 2610 session = cls_session->dd_data; 2611 2612 mutex_lock(&session->eh_mutex); 2613 spin_lock_bh(&session->frwd_lock); 2614 if (session->state == ISCSI_STATE_TERMINATE) { 2615 failed: 2616 ISCSI_DBG_EH(session, 2617 "failing session reset: Could not log back into " 2618 "%s [age %d]\n", session->targetname, 2619 session->age); 2620 spin_unlock_bh(&session->frwd_lock); 2621 mutex_unlock(&session->eh_mutex); 2622 return FAILED; 2623 } 2624 2625 conn = session->leadconn; 2626 iscsi_get_conn(conn->cls_conn); 2627 2628 spin_unlock_bh(&session->frwd_lock); 2629 mutex_unlock(&session->eh_mutex); 2630 2631 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); 2632 iscsi_put_conn(conn->cls_conn); 2633 2634 ISCSI_DBG_EH(session, "wait for relogin\n"); 2635 wait_event_interruptible(session->ehwait, 2636 session->state == ISCSI_STATE_TERMINATE || 2637 session->state == ISCSI_STATE_LOGGED_IN || 2638 session->state == ISCSI_STATE_RECOVERY_FAILED); 2639 if (signal_pending(current)) 2640 flush_signals(current); 2641 2642 mutex_lock(&session->eh_mutex); 2643 spin_lock_bh(&session->frwd_lock); 2644 if (session->state == ISCSI_STATE_LOGGED_IN) { 2645 ISCSI_DBG_EH(session, 2646 "session reset succeeded for %s,%s\n", 2647 session->targetname, conn->persistent_address); 2648 } else 2649 goto failed; 2650 spin_unlock_bh(&session->frwd_lock); 2651 mutex_unlock(&session->eh_mutex); 2652 return SUCCESS; 2653 } 2654 EXPORT_SYMBOL_GPL(iscsi_eh_session_reset); 2655 2656 static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) 2657 { 2658 memset(hdr, 0, sizeof(*hdr)); 2659 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; 2660 hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK; 2661 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 2662 hdr->rtt = RESERVED_ITT; 2663 } 2664 2665 /** 2666 * iscsi_eh_target_reset - reset target 2667 * @sc: scsi command 2668 * 2669 * This will attempt to send a warm target reset. 2670 */ 2671 static int iscsi_eh_target_reset(struct scsi_cmnd *sc) 2672 { 2673 struct iscsi_cls_session *cls_session; 2674 struct iscsi_session *session; 2675 struct iscsi_conn *conn; 2676 struct iscsi_tm *hdr; 2677 int rc = FAILED; 2678 2679 cls_session = starget_to_session(scsi_target(sc->device)); 2680 session = cls_session->dd_data; 2681 2682 ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc, 2683 session->targetname); 2684 2685 mutex_lock(&session->eh_mutex); 2686 spin_lock_bh(&session->frwd_lock); 2687 /* 2688 * Just check if we are not logged in. We cannot check for 2689 * the phase because the reset could come from a ioctl. 2690 */ 2691 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) 2692 goto unlock; 2693 conn = session->leadconn; 2694 2695 /* only have one tmf outstanding at a time */ 2696 if (session->tmf_state != TMF_INITIAL) 2697 goto unlock; 2698 session->tmf_state = TMF_QUEUED; 2699 2700 hdr = &session->tmhdr; 2701 iscsi_prep_tgt_reset_pdu(sc, hdr); 2702 2703 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, 2704 session->tgt_reset_timeout)) { 2705 rc = FAILED; 2706 goto unlock; 2707 } 2708 2709 switch (session->tmf_state) { 2710 case TMF_SUCCESS: 2711 break; 2712 case TMF_TIMEDOUT: 2713 spin_unlock_bh(&session->frwd_lock); 2714 iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); 2715 goto done; 2716 default: 2717 session->tmf_state = TMF_INITIAL; 2718 goto unlock; 2719 } 2720 2721 rc = SUCCESS; 2722 spin_unlock_bh(&session->frwd_lock); 2723 2724 iscsi_suspend_tx(conn); 2725 2726 spin_lock_bh(&session->frwd_lock); 2727 memset(hdr, 0, sizeof(*hdr)); 2728 fail_scsi_tasks(conn, -1, DID_ERROR); 2729 session->tmf_state = TMF_INITIAL; 2730 spin_unlock_bh(&session->frwd_lock); 2731 2732 iscsi_start_tx(conn); 2733 goto done; 2734 2735 unlock: 2736 spin_unlock_bh(&session->frwd_lock); 2737 done: 2738 ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname, 2739 rc == SUCCESS ? "SUCCESS" : "FAILED"); 2740 mutex_unlock(&session->eh_mutex); 2741 return rc; 2742 } 2743 2744 /** 2745 * iscsi_eh_recover_target - reset target and possibly the session 2746 * @sc: scsi command 2747 * 2748 * This will attempt to send a warm target reset. If that fails, 2749 * we will escalate to ERL0 session recovery. 2750 */ 2751 int iscsi_eh_recover_target(struct scsi_cmnd *sc) 2752 { 2753 int rc; 2754 2755 rc = iscsi_eh_target_reset(sc); 2756 if (rc == FAILED) 2757 rc = iscsi_eh_session_reset(sc); 2758 return rc; 2759 } 2760 EXPORT_SYMBOL_GPL(iscsi_eh_recover_target); 2761 2762 /* 2763 * Pre-allocate a pool of @max items of @item_size. By default, the pool 2764 * should be accessed via kfifo_{get,put} on q->queue. 2765 * Optionally, the caller can obtain the array of object pointers 2766 * by passing in a non-NULL @items pointer 2767 */ 2768 int 2769 iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) 2770 { 2771 int i, num_arrays = 1; 2772 2773 memset(q, 0, sizeof(*q)); 2774 2775 q->max = max; 2776 2777 /* If the user passed an items pointer, he wants a copy of 2778 * the array. */ 2779 if (items) 2780 num_arrays++; 2781 q->pool = kvcalloc(num_arrays * max, sizeof(void *), GFP_KERNEL); 2782 if (q->pool == NULL) 2783 return -ENOMEM; 2784 2785 kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*)); 2786 2787 for (i = 0; i < max; i++) { 2788 q->pool[i] = kzalloc(item_size, GFP_KERNEL); 2789 if (q->pool[i] == NULL) { 2790 q->max = i; 2791 goto enomem; 2792 } 2793 kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*)); 2794 } 2795 2796 if (items) { 2797 *items = q->pool + max; 2798 memcpy(*items, q->pool, max * sizeof(void *)); 2799 } 2800 2801 return 0; 2802 2803 enomem: 2804 iscsi_pool_free(q); 2805 return -ENOMEM; 2806 } 2807 EXPORT_SYMBOL_GPL(iscsi_pool_init); 2808 2809 void iscsi_pool_free(struct iscsi_pool *q) 2810 { 2811 int i; 2812 2813 for (i = 0; i < q->max; i++) 2814 kfree(q->pool[i]); 2815 kvfree(q->pool); 2816 } 2817 EXPORT_SYMBOL_GPL(iscsi_pool_free); 2818 2819 int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost, 2820 uint16_t requested_cmds_max) 2821 { 2822 int scsi_cmds, total_cmds = requested_cmds_max; 2823 2824 check: 2825 if (!total_cmds) 2826 total_cmds = ISCSI_DEF_XMIT_CMDS_MAX; 2827 /* 2828 * The iscsi layer needs some tasks for nop handling and tmfs, 2829 * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX 2830 * + 1 command for scsi IO. 2831 */ 2832 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { 2833 printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of two that is at least %d.\n", 2834 total_cmds, ISCSI_TOTAL_CMDS_MIN); 2835 return -EINVAL; 2836 } 2837 2838 if (total_cmds > ISCSI_TOTAL_CMDS_MAX) { 2839 printk(KERN_INFO "iscsi: invalid max cmds of %d. Must be a power of 2 less than or equal to %d. Using %d.\n", 2840 requested_cmds_max, ISCSI_TOTAL_CMDS_MAX, 2841 ISCSI_TOTAL_CMDS_MAX); 2842 total_cmds = ISCSI_TOTAL_CMDS_MAX; 2843 } 2844 2845 if (!is_power_of_2(total_cmds)) { 2846 total_cmds = rounddown_pow_of_two(total_cmds); 2847 if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { 2848 printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of 2 greater than %d.\n", requested_cmds_max, ISCSI_TOTAL_CMDS_MIN); 2849 return -EINVAL; 2850 } 2851 2852 printk(KERN_INFO "iscsi: invalid max cmds %d. Must be a power of 2. Rounding max cmds down to %d.\n", 2853 requested_cmds_max, total_cmds); 2854 } 2855 2856 scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX; 2857 if (shost->can_queue && scsi_cmds > shost->can_queue) { 2858 total_cmds = shost->can_queue; 2859 2860 printk(KERN_INFO "iscsi: requested max cmds %u is higher than driver limit. Using driver limit %u\n", 2861 requested_cmds_max, shost->can_queue); 2862 goto check; 2863 } 2864 2865 return scsi_cmds; 2866 } 2867 EXPORT_SYMBOL_GPL(iscsi_host_get_max_scsi_cmds); 2868 2869 /** 2870 * iscsi_host_add - add host to system 2871 * @shost: scsi host 2872 * @pdev: parent device 2873 * 2874 * This should be called by partial offload and software iscsi drivers 2875 * to add a host to the system. 2876 */ 2877 int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev) 2878 { 2879 if (!shost->can_queue) 2880 shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; 2881 2882 if (!shost->cmd_per_lun) 2883 shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN; 2884 2885 return scsi_add_host(shost, pdev); 2886 } 2887 EXPORT_SYMBOL_GPL(iscsi_host_add); 2888 2889 /** 2890 * iscsi_host_alloc - allocate a host and driver data 2891 * @sht: scsi host template 2892 * @dd_data_size: driver host data size 2893 * @xmit_can_sleep: bool indicating if LLD will queue IO from a work queue 2894 * 2895 * This should be called by partial offload and software iscsi drivers. 2896 * To access the driver specific memory use the iscsi_host_priv() macro. 2897 */ 2898 struct Scsi_Host *iscsi_host_alloc(const struct scsi_host_template *sht, 2899 int dd_data_size, bool xmit_can_sleep) 2900 { 2901 struct Scsi_Host *shost; 2902 struct iscsi_host *ihost; 2903 2904 shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); 2905 if (!shost) 2906 return NULL; 2907 ihost = shost_priv(shost); 2908 2909 if (xmit_can_sleep) { 2910 ihost->workq = alloc_workqueue("iscsi_q_%d", 2911 WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, 2912 1, shost->host_no); 2913 if (!ihost->workq) 2914 goto free_host; 2915 } 2916 2917 spin_lock_init(&ihost->lock); 2918 ihost->state = ISCSI_HOST_SETUP; 2919 ihost->num_sessions = 0; 2920 init_waitqueue_head(&ihost->session_removal_wq); 2921 return shost; 2922 2923 free_host: 2924 scsi_host_put(shost); 2925 return NULL; 2926 } 2927 EXPORT_SYMBOL_GPL(iscsi_host_alloc); 2928 2929 static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session) 2930 { 2931 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_INVALID_HOST); 2932 } 2933 2934 /** 2935 * iscsi_host_remove - remove host and sessions 2936 * @shost: scsi host 2937 * @is_shutdown: true if called from a driver shutdown callout 2938 * 2939 * If there are any sessions left, this will initiate the removal and wait 2940 * for the completion. 2941 */ 2942 void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown) 2943 { 2944 struct iscsi_host *ihost = shost_priv(shost); 2945 unsigned long flags; 2946 2947 spin_lock_irqsave(&ihost->lock, flags); 2948 ihost->state = ISCSI_HOST_REMOVED; 2949 spin_unlock_irqrestore(&ihost->lock, flags); 2950 2951 if (!is_shutdown) 2952 iscsi_host_for_each_session(shost, iscsi_notify_host_removed); 2953 else 2954 iscsi_host_for_each_session(shost, iscsi_force_destroy_session); 2955 2956 wait_event_interruptible(ihost->session_removal_wq, 2957 ihost->num_sessions == 0); 2958 if (signal_pending(current)) 2959 flush_signals(current); 2960 2961 scsi_remove_host(shost); 2962 } 2963 EXPORT_SYMBOL_GPL(iscsi_host_remove); 2964 2965 void iscsi_host_free(struct Scsi_Host *shost) 2966 { 2967 struct iscsi_host *ihost = shost_priv(shost); 2968 2969 if (ihost->workq) 2970 destroy_workqueue(ihost->workq); 2971 2972 kfree(ihost->netdev); 2973 kfree(ihost->hwaddress); 2974 kfree(ihost->initiatorname); 2975 scsi_host_put(shost); 2976 } 2977 EXPORT_SYMBOL_GPL(iscsi_host_free); 2978 2979 static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost) 2980 { 2981 struct iscsi_host *ihost = shost_priv(shost); 2982 unsigned long flags; 2983 2984 shost = scsi_host_get(shost); 2985 if (!shost) { 2986 printk(KERN_ERR "Invalid state. Cannot notify host removal " 2987 "of session teardown event because host already " 2988 "removed.\n"); 2989 return; 2990 } 2991 2992 spin_lock_irqsave(&ihost->lock, flags); 2993 ihost->num_sessions--; 2994 if (ihost->num_sessions == 0) 2995 wake_up(&ihost->session_removal_wq); 2996 spin_unlock_irqrestore(&ihost->lock, flags); 2997 scsi_host_put(shost); 2998 } 2999 3000 /** 3001 * iscsi_session_setup - create iscsi cls session and host and session 3002 * @iscsit: iscsi transport template 3003 * @shost: scsi host 3004 * @cmds_max: session can queue 3005 * @dd_size: private driver data size, added to session allocation size 3006 * @cmd_task_size: LLD task private data size 3007 * @initial_cmdsn: initial CmdSN 3008 * @id: target ID to add to this session 3009 * 3010 * This can be used by software iscsi_transports that allocate 3011 * a session per scsi host. 3012 * 3013 * Callers should set cmds_max to the largest total numer (mgmt + scsi) of 3014 * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks 3015 * for nop handling and login/logout requests. 3016 */ 3017 struct iscsi_cls_session * 3018 iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, 3019 uint16_t cmds_max, int dd_size, int cmd_task_size, 3020 uint32_t initial_cmdsn, unsigned int id) 3021 { 3022 struct iscsi_host *ihost = shost_priv(shost); 3023 struct iscsi_session *session; 3024 struct iscsi_cls_session *cls_session; 3025 int cmd_i, scsi_cmds; 3026 unsigned long flags; 3027 3028 spin_lock_irqsave(&ihost->lock, flags); 3029 if (ihost->state == ISCSI_HOST_REMOVED) { 3030 spin_unlock_irqrestore(&ihost->lock, flags); 3031 return NULL; 3032 } 3033 ihost->num_sessions++; 3034 spin_unlock_irqrestore(&ihost->lock, flags); 3035 3036 scsi_cmds = iscsi_host_get_max_scsi_cmds(shost, cmds_max); 3037 if (scsi_cmds < 0) 3038 goto dec_session_count; 3039 3040 cls_session = iscsi_alloc_session(shost, iscsit, 3041 sizeof(struct iscsi_session) + 3042 dd_size); 3043 if (!cls_session) 3044 goto dec_session_count; 3045 session = cls_session->dd_data; 3046 session->cls_session = cls_session; 3047 session->host = shost; 3048 session->state = ISCSI_STATE_FREE; 3049 session->fast_abort = 1; 3050 session->tgt_reset_timeout = 30; 3051 session->lu_reset_timeout = 15; 3052 session->abort_timeout = 10; 3053 session->scsi_cmds_max = scsi_cmds; 3054 session->cmds_max = scsi_cmds + ISCSI_MGMT_CMDS_MAX; 3055 session->queued_cmdsn = session->cmdsn = initial_cmdsn; 3056 session->exp_cmdsn = initial_cmdsn + 1; 3057 session->max_cmdsn = initial_cmdsn + 1; 3058 session->max_r2t = 1; 3059 session->tt = iscsit; 3060 session->dd_data = cls_session->dd_data + sizeof(*session); 3061 3062 session->tmf_state = TMF_INITIAL; 3063 timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0); 3064 mutex_init(&session->eh_mutex); 3065 init_waitqueue_head(&session->ehwait); 3066 3067 spin_lock_init(&session->frwd_lock); 3068 spin_lock_init(&session->back_lock); 3069 3070 /* initialize SCSI PDU commands pool */ 3071 if (iscsi_pool_init(&session->cmdpool, session->cmds_max, 3072 (void***)&session->cmds, 3073 cmd_task_size + sizeof(struct iscsi_task))) 3074 goto cmdpool_alloc_fail; 3075 3076 /* pre-format cmds pool with ITT */ 3077 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { 3078 struct iscsi_task *task = session->cmds[cmd_i]; 3079 3080 if (cmd_task_size) 3081 task->dd_data = &task[1]; 3082 task->itt = cmd_i; 3083 task->state = ISCSI_TASK_FREE; 3084 INIT_LIST_HEAD(&task->running); 3085 } 3086 3087 if (!try_module_get(iscsit->owner)) 3088 goto module_get_fail; 3089 3090 if (iscsi_add_session(cls_session, id)) 3091 goto cls_session_fail; 3092 3093 return cls_session; 3094 3095 cls_session_fail: 3096 module_put(iscsit->owner); 3097 module_get_fail: 3098 iscsi_pool_free(&session->cmdpool); 3099 cmdpool_alloc_fail: 3100 iscsi_free_session(cls_session); 3101 dec_session_count: 3102 iscsi_host_dec_session_cnt(shost); 3103 return NULL; 3104 } 3105 EXPORT_SYMBOL_GPL(iscsi_session_setup); 3106 3107 /* 3108 * issi_session_remove - Remove session from iSCSI class. 3109 */ 3110 void iscsi_session_remove(struct iscsi_cls_session *cls_session) 3111 { 3112 struct iscsi_session *session = cls_session->dd_data; 3113 struct Scsi_Host *shost = session->host; 3114 3115 iscsi_remove_session(cls_session); 3116 /* 3117 * host removal only has to wait for its children to be removed from 3118 * sysfs, and iscsi_tcp needs to do iscsi_host_remove before freeing 3119 * the session, so drop the session count here. 3120 */ 3121 iscsi_host_dec_session_cnt(shost); 3122 } 3123 EXPORT_SYMBOL_GPL(iscsi_session_remove); 3124 3125 /** 3126 * iscsi_session_free - Free iscsi session and it's resources 3127 * @cls_session: iscsi session 3128 */ 3129 void iscsi_session_free(struct iscsi_cls_session *cls_session) 3130 { 3131 struct iscsi_session *session = cls_session->dd_data; 3132 struct module *owner = cls_session->transport->owner; 3133 3134 iscsi_pool_free(&session->cmdpool); 3135 kfree(session->password); 3136 kfree(session->password_in); 3137 kfree(session->username); 3138 kfree(session->username_in); 3139 kfree(session->targetname); 3140 kfree(session->targetalias); 3141 kfree(session->initiatorname); 3142 kfree(session->boot_root); 3143 kfree(session->boot_nic); 3144 kfree(session->boot_target); 3145 kfree(session->ifacename); 3146 kfree(session->portal_type); 3147 kfree(session->discovery_parent_type); 3148 3149 iscsi_free_session(cls_session); 3150 module_put(owner); 3151 } 3152 EXPORT_SYMBOL_GPL(iscsi_session_free); 3153 3154 /** 3155 * iscsi_session_teardown - destroy session and cls_session 3156 * @cls_session: iscsi session 3157 */ 3158 void iscsi_session_teardown(struct iscsi_cls_session *cls_session) 3159 { 3160 iscsi_session_remove(cls_session); 3161 iscsi_session_free(cls_session); 3162 } 3163 EXPORT_SYMBOL_GPL(iscsi_session_teardown); 3164 3165 /** 3166 * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn 3167 * @cls_session: iscsi_cls_session 3168 * @dd_size: private driver data size 3169 * @conn_idx: cid 3170 */ 3171 struct iscsi_cls_conn * 3172 iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, 3173 uint32_t conn_idx) 3174 { 3175 struct iscsi_session *session = cls_session->dd_data; 3176 struct iscsi_conn *conn; 3177 struct iscsi_cls_conn *cls_conn; 3178 char *data; 3179 int err; 3180 3181 cls_conn = iscsi_alloc_conn(cls_session, sizeof(*conn) + dd_size, 3182 conn_idx); 3183 if (!cls_conn) 3184 return NULL; 3185 conn = cls_conn->dd_data; 3186 3187 conn->dd_data = cls_conn->dd_data + sizeof(*conn); 3188 conn->session = session; 3189 conn->cls_conn = cls_conn; 3190 conn->c_stage = ISCSI_CONN_INITIAL_STAGE; 3191 conn->id = conn_idx; 3192 conn->exp_statsn = 0; 3193 3194 timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0); 3195 3196 INIT_LIST_HEAD(&conn->mgmtqueue); 3197 INIT_LIST_HEAD(&conn->cmdqueue); 3198 INIT_LIST_HEAD(&conn->requeue); 3199 INIT_WORK(&conn->xmitwork, iscsi_xmitworker); 3200 3201 /* allocate login_task used for the login/text sequences */ 3202 spin_lock_bh(&session->frwd_lock); 3203 if (!kfifo_out(&session->cmdpool.queue, 3204 (void*)&conn->login_task, 3205 sizeof(void*))) { 3206 spin_unlock_bh(&session->frwd_lock); 3207 goto login_task_alloc_fail; 3208 } 3209 spin_unlock_bh(&session->frwd_lock); 3210 3211 data = (char *) __get_free_pages(GFP_KERNEL, 3212 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); 3213 if (!data) 3214 goto login_task_data_alloc_fail; 3215 conn->login_task->data = conn->data = data; 3216 3217 err = iscsi_add_conn(cls_conn); 3218 if (err) 3219 goto login_task_add_dev_fail; 3220 3221 return cls_conn; 3222 3223 login_task_add_dev_fail: 3224 free_pages((unsigned long) conn->data, 3225 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); 3226 3227 login_task_data_alloc_fail: 3228 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, 3229 sizeof(void*)); 3230 login_task_alloc_fail: 3231 iscsi_put_conn(cls_conn); 3232 return NULL; 3233 } 3234 EXPORT_SYMBOL_GPL(iscsi_conn_setup); 3235 3236 /** 3237 * iscsi_conn_teardown - teardown iscsi connection 3238 * @cls_conn: iscsi class connection 3239 * 3240 * TODO: we may need to make this into a two step process 3241 * like scsi-mls remove + put host 3242 */ 3243 void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) 3244 { 3245 struct iscsi_conn *conn = cls_conn->dd_data; 3246 struct iscsi_session *session = conn->session; 3247 3248 iscsi_remove_conn(cls_conn); 3249 3250 del_timer_sync(&conn->transport_timer); 3251 3252 mutex_lock(&session->eh_mutex); 3253 spin_lock_bh(&session->frwd_lock); 3254 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 3255 if (session->leadconn == conn) { 3256 /* 3257 * leading connection? then give up on recovery. 3258 */ 3259 session->state = ISCSI_STATE_TERMINATE; 3260 wake_up(&session->ehwait); 3261 } 3262 spin_unlock_bh(&session->frwd_lock); 3263 3264 /* flush queued up work because we free the connection below */ 3265 iscsi_suspend_tx(conn); 3266 3267 spin_lock_bh(&session->frwd_lock); 3268 free_pages((unsigned long) conn->data, 3269 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); 3270 kfree(conn->persistent_address); 3271 kfree(conn->local_ipaddr); 3272 /* regular RX path uses back_lock */ 3273 spin_lock_bh(&session->back_lock); 3274 kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, 3275 sizeof(void*)); 3276 spin_unlock_bh(&session->back_lock); 3277 if (session->leadconn == conn) 3278 session->leadconn = NULL; 3279 spin_unlock_bh(&session->frwd_lock); 3280 mutex_unlock(&session->eh_mutex); 3281 3282 iscsi_put_conn(cls_conn); 3283 } 3284 EXPORT_SYMBOL_GPL(iscsi_conn_teardown); 3285 3286 int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) 3287 { 3288 struct iscsi_conn *conn = cls_conn->dd_data; 3289 struct iscsi_session *session = conn->session; 3290 3291 if (!session) { 3292 iscsi_conn_printk(KERN_ERR, conn, 3293 "can't start unbound connection\n"); 3294 return -EPERM; 3295 } 3296 3297 if ((session->imm_data_en || !session->initial_r2t_en) && 3298 session->first_burst > session->max_burst) { 3299 iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: " 3300 "first_burst %d max_burst %d\n", 3301 session->first_burst, session->max_burst); 3302 return -EINVAL; 3303 } 3304 3305 if (conn->ping_timeout && !conn->recv_timeout) { 3306 iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of " 3307 "zero. Using 5 seconds\n."); 3308 conn->recv_timeout = 5; 3309 } 3310 3311 if (conn->recv_timeout && !conn->ping_timeout) { 3312 iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of " 3313 "zero. Using 5 seconds.\n"); 3314 conn->ping_timeout = 5; 3315 } 3316 3317 spin_lock_bh(&session->frwd_lock); 3318 conn->c_stage = ISCSI_CONN_STARTED; 3319 session->state = ISCSI_STATE_LOGGED_IN; 3320 session->queued_cmdsn = session->cmdsn; 3321 3322 conn->last_recv = jiffies; 3323 conn->last_ping = jiffies; 3324 if (conn->recv_timeout && conn->ping_timeout) 3325 mod_timer(&conn->transport_timer, 3326 jiffies + (conn->recv_timeout * HZ)); 3327 3328 switch(conn->stop_stage) { 3329 case STOP_CONN_RECOVER: 3330 /* 3331 * unblock eh_abort() if it is blocked. re-try all 3332 * commands after successful recovery 3333 */ 3334 conn->stop_stage = 0; 3335 session->tmf_state = TMF_INITIAL; 3336 session->age++; 3337 if (session->age == 16) 3338 session->age = 0; 3339 break; 3340 case STOP_CONN_TERM: 3341 conn->stop_stage = 0; 3342 break; 3343 default: 3344 break; 3345 } 3346 spin_unlock_bh(&session->frwd_lock); 3347 3348 iscsi_unblock_session(session->cls_session); 3349 wake_up(&session->ehwait); 3350 return 0; 3351 } 3352 EXPORT_SYMBOL_GPL(iscsi_conn_start); 3353 3354 static void 3355 fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn) 3356 { 3357 struct iscsi_task *task; 3358 int i, state; 3359 3360 for (i = 0; i < conn->session->cmds_max; i++) { 3361 task = conn->session->cmds[i]; 3362 if (task->sc) 3363 continue; 3364 3365 if (task->state == ISCSI_TASK_FREE) 3366 continue; 3367 3368 ISCSI_DBG_SESSION(conn->session, 3369 "failing mgmt itt 0x%x state %d\n", 3370 task->itt, task->state); 3371 3372 spin_lock_bh(&session->back_lock); 3373 if (cleanup_queued_task(task)) { 3374 spin_unlock_bh(&session->back_lock); 3375 continue; 3376 } 3377 3378 state = ISCSI_TASK_ABRT_SESS_RECOV; 3379 if (task->state == ISCSI_TASK_PENDING) 3380 state = ISCSI_TASK_COMPLETED; 3381 iscsi_complete_task(task, state); 3382 spin_unlock_bh(&session->back_lock); 3383 } 3384 } 3385 3386 void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 3387 { 3388 struct iscsi_conn *conn = cls_conn->dd_data; 3389 struct iscsi_session *session = conn->session; 3390 int old_stop_stage; 3391 3392 mutex_lock(&session->eh_mutex); 3393 spin_lock_bh(&session->frwd_lock); 3394 if (conn->stop_stage == STOP_CONN_TERM) { 3395 spin_unlock_bh(&session->frwd_lock); 3396 mutex_unlock(&session->eh_mutex); 3397 return; 3398 } 3399 3400 /* 3401 * When this is called for the in_login state, we only want to clean 3402 * up the login task and connection. We do not need to block and set 3403 * the recovery state again 3404 */ 3405 if (flag == STOP_CONN_TERM) 3406 session->state = ISCSI_STATE_TERMINATE; 3407 else if (conn->stop_stage != STOP_CONN_RECOVER) 3408 session->state = ISCSI_STATE_IN_RECOVERY; 3409 3410 old_stop_stage = conn->stop_stage; 3411 conn->stop_stage = flag; 3412 spin_unlock_bh(&session->frwd_lock); 3413 3414 del_timer_sync(&conn->transport_timer); 3415 iscsi_suspend_tx(conn); 3416 3417 spin_lock_bh(&session->frwd_lock); 3418 conn->c_stage = ISCSI_CONN_STOPPED; 3419 spin_unlock_bh(&session->frwd_lock); 3420 3421 /* 3422 * for connection level recovery we should not calculate 3423 * header digest. conn->hdr_size used for optimization 3424 * in hdr_extract() and will be re-negotiated at 3425 * set_param() time. 3426 */ 3427 if (flag == STOP_CONN_RECOVER) { 3428 conn->hdrdgst_en = 0; 3429 conn->datadgst_en = 0; 3430 if (session->state == ISCSI_STATE_IN_RECOVERY && 3431 old_stop_stage != STOP_CONN_RECOVER) { 3432 ISCSI_DBG_SESSION(session, "blocking session\n"); 3433 iscsi_block_session(session->cls_session); 3434 } 3435 } 3436 3437 /* 3438 * flush queues. 3439 */ 3440 spin_lock_bh(&session->frwd_lock); 3441 fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); 3442 fail_mgmt_tasks(session, conn); 3443 memset(&session->tmhdr, 0, sizeof(session->tmhdr)); 3444 spin_unlock_bh(&session->frwd_lock); 3445 mutex_unlock(&session->eh_mutex); 3446 } 3447 EXPORT_SYMBOL_GPL(iscsi_conn_stop); 3448 3449 int iscsi_conn_bind(struct iscsi_cls_session *cls_session, 3450 struct iscsi_cls_conn *cls_conn, int is_leading) 3451 { 3452 struct iscsi_session *session = cls_session->dd_data; 3453 struct iscsi_conn *conn = cls_conn->dd_data; 3454 3455 spin_lock_bh(&session->frwd_lock); 3456 if (is_leading) 3457 session->leadconn = conn; 3458 3459 set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags); 3460 spin_unlock_bh(&session->frwd_lock); 3461 3462 /* 3463 * The target could have reduced it's window size between logins, so 3464 * we have to reset max/exp cmdsn so we can see the new values. 3465 */ 3466 spin_lock_bh(&session->back_lock); 3467 session->max_cmdsn = session->exp_cmdsn = session->cmdsn + 1; 3468 spin_unlock_bh(&session->back_lock); 3469 /* 3470 * Unblock xmitworker(), Login Phase will pass through. 3471 */ 3472 clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags); 3473 clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags); 3474 return 0; 3475 } 3476 EXPORT_SYMBOL_GPL(iscsi_conn_bind); 3477 3478 int iscsi_switch_str_param(char **param, char *new_val_buf) 3479 { 3480 char *new_val; 3481 3482 if (*param) { 3483 if (!strcmp(*param, new_val_buf)) 3484 return 0; 3485 } 3486 3487 new_val = kstrdup(new_val_buf, GFP_NOIO); 3488 if (!new_val) 3489 return -ENOMEM; 3490 3491 kfree(*param); 3492 *param = new_val; 3493 return 0; 3494 } 3495 EXPORT_SYMBOL_GPL(iscsi_switch_str_param); 3496 3497 int iscsi_set_param(struct iscsi_cls_conn *cls_conn, 3498 enum iscsi_param param, char *buf, int buflen) 3499 { 3500 struct iscsi_conn *conn = cls_conn->dd_data; 3501 struct iscsi_session *session = conn->session; 3502 int val; 3503 3504 switch(param) { 3505 case ISCSI_PARAM_FAST_ABORT: 3506 sscanf(buf, "%d", &session->fast_abort); 3507 break; 3508 case ISCSI_PARAM_ABORT_TMO: 3509 sscanf(buf, "%d", &session->abort_timeout); 3510 break; 3511 case ISCSI_PARAM_LU_RESET_TMO: 3512 sscanf(buf, "%d", &session->lu_reset_timeout); 3513 break; 3514 case ISCSI_PARAM_TGT_RESET_TMO: 3515 sscanf(buf, "%d", &session->tgt_reset_timeout); 3516 break; 3517 case ISCSI_PARAM_PING_TMO: 3518 sscanf(buf, "%d", &conn->ping_timeout); 3519 break; 3520 case ISCSI_PARAM_RECV_TMO: 3521 sscanf(buf, "%d", &conn->recv_timeout); 3522 break; 3523 case ISCSI_PARAM_MAX_RECV_DLENGTH: 3524 sscanf(buf, "%d", &conn->max_recv_dlength); 3525 break; 3526 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 3527 sscanf(buf, "%d", &conn->max_xmit_dlength); 3528 break; 3529 case ISCSI_PARAM_HDRDGST_EN: 3530 sscanf(buf, "%d", &conn->hdrdgst_en); 3531 break; 3532 case ISCSI_PARAM_DATADGST_EN: 3533 sscanf(buf, "%d", &conn->datadgst_en); 3534 break; 3535 case ISCSI_PARAM_INITIAL_R2T_EN: 3536 sscanf(buf, "%d", &session->initial_r2t_en); 3537 break; 3538 case ISCSI_PARAM_MAX_R2T: 3539 sscanf(buf, "%hu", &session->max_r2t); 3540 break; 3541 case ISCSI_PARAM_IMM_DATA_EN: 3542 sscanf(buf, "%d", &session->imm_data_en); 3543 break; 3544 case ISCSI_PARAM_FIRST_BURST: 3545 sscanf(buf, "%d", &session->first_burst); 3546 break; 3547 case ISCSI_PARAM_MAX_BURST: 3548 sscanf(buf, "%d", &session->max_burst); 3549 break; 3550 case ISCSI_PARAM_PDU_INORDER_EN: 3551 sscanf(buf, "%d", &session->pdu_inorder_en); 3552 break; 3553 case ISCSI_PARAM_DATASEQ_INORDER_EN: 3554 sscanf(buf, "%d", &session->dataseq_inorder_en); 3555 break; 3556 case ISCSI_PARAM_ERL: 3557 sscanf(buf, "%d", &session->erl); 3558 break; 3559 case ISCSI_PARAM_EXP_STATSN: 3560 sscanf(buf, "%u", &conn->exp_statsn); 3561 break; 3562 case ISCSI_PARAM_USERNAME: 3563 return iscsi_switch_str_param(&session->username, buf); 3564 case ISCSI_PARAM_USERNAME_IN: 3565 return iscsi_switch_str_param(&session->username_in, buf); 3566 case ISCSI_PARAM_PASSWORD: 3567 return iscsi_switch_str_param(&session->password, buf); 3568 case ISCSI_PARAM_PASSWORD_IN: 3569 return iscsi_switch_str_param(&session->password_in, buf); 3570 case ISCSI_PARAM_TARGET_NAME: 3571 return iscsi_switch_str_param(&session->targetname, buf); 3572 case ISCSI_PARAM_TARGET_ALIAS: 3573 return iscsi_switch_str_param(&session->targetalias, buf); 3574 case ISCSI_PARAM_TPGT: 3575 sscanf(buf, "%d", &session->tpgt); 3576 break; 3577 case ISCSI_PARAM_PERSISTENT_PORT: 3578 sscanf(buf, "%d", &conn->persistent_port); 3579 break; 3580 case ISCSI_PARAM_PERSISTENT_ADDRESS: 3581 return iscsi_switch_str_param(&conn->persistent_address, buf); 3582 case ISCSI_PARAM_IFACE_NAME: 3583 return iscsi_switch_str_param(&session->ifacename, buf); 3584 case ISCSI_PARAM_INITIATOR_NAME: 3585 return iscsi_switch_str_param(&session->initiatorname, buf); 3586 case ISCSI_PARAM_BOOT_ROOT: 3587 return iscsi_switch_str_param(&session->boot_root, buf); 3588 case ISCSI_PARAM_BOOT_NIC: 3589 return iscsi_switch_str_param(&session->boot_nic, buf); 3590 case ISCSI_PARAM_BOOT_TARGET: 3591 return iscsi_switch_str_param(&session->boot_target, buf); 3592 case ISCSI_PARAM_PORTAL_TYPE: 3593 return iscsi_switch_str_param(&session->portal_type, buf); 3594 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: 3595 return iscsi_switch_str_param(&session->discovery_parent_type, 3596 buf); 3597 case ISCSI_PARAM_DISCOVERY_SESS: 3598 sscanf(buf, "%d", &val); 3599 session->discovery_sess = !!val; 3600 break; 3601 case ISCSI_PARAM_LOCAL_IPADDR: 3602 return iscsi_switch_str_param(&conn->local_ipaddr, buf); 3603 default: 3604 return -ENOSYS; 3605 } 3606 3607 return 0; 3608 } 3609 EXPORT_SYMBOL_GPL(iscsi_set_param); 3610 3611 int iscsi_session_get_param(struct iscsi_cls_session *cls_session, 3612 enum iscsi_param param, char *buf) 3613 { 3614 struct iscsi_session *session = cls_session->dd_data; 3615 int len; 3616 3617 switch(param) { 3618 case ISCSI_PARAM_FAST_ABORT: 3619 len = sysfs_emit(buf, "%d\n", session->fast_abort); 3620 break; 3621 case ISCSI_PARAM_ABORT_TMO: 3622 len = sysfs_emit(buf, "%d\n", session->abort_timeout); 3623 break; 3624 case ISCSI_PARAM_LU_RESET_TMO: 3625 len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout); 3626 break; 3627 case ISCSI_PARAM_TGT_RESET_TMO: 3628 len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout); 3629 break; 3630 case ISCSI_PARAM_INITIAL_R2T_EN: 3631 len = sysfs_emit(buf, "%d\n", session->initial_r2t_en); 3632 break; 3633 case ISCSI_PARAM_MAX_R2T: 3634 len = sysfs_emit(buf, "%hu\n", session->max_r2t); 3635 break; 3636 case ISCSI_PARAM_IMM_DATA_EN: 3637 len = sysfs_emit(buf, "%d\n", session->imm_data_en); 3638 break; 3639 case ISCSI_PARAM_FIRST_BURST: 3640 len = sysfs_emit(buf, "%u\n", session->first_burst); 3641 break; 3642 case ISCSI_PARAM_MAX_BURST: 3643 len = sysfs_emit(buf, "%u\n", session->max_burst); 3644 break; 3645 case ISCSI_PARAM_PDU_INORDER_EN: 3646 len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en); 3647 break; 3648 case ISCSI_PARAM_DATASEQ_INORDER_EN: 3649 len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en); 3650 break; 3651 case ISCSI_PARAM_DEF_TASKMGMT_TMO: 3652 len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo); 3653 break; 3654 case ISCSI_PARAM_ERL: 3655 len = sysfs_emit(buf, "%d\n", session->erl); 3656 break; 3657 case ISCSI_PARAM_TARGET_NAME: 3658 len = sysfs_emit(buf, "%s\n", session->targetname); 3659 break; 3660 case ISCSI_PARAM_TARGET_ALIAS: 3661 len = sysfs_emit(buf, "%s\n", session->targetalias); 3662 break; 3663 case ISCSI_PARAM_TPGT: 3664 len = sysfs_emit(buf, "%d\n", session->tpgt); 3665 break; 3666 case ISCSI_PARAM_USERNAME: 3667 len = sysfs_emit(buf, "%s\n", session->username); 3668 break; 3669 case ISCSI_PARAM_USERNAME_IN: 3670 len = sysfs_emit(buf, "%s\n", session->username_in); 3671 break; 3672 case ISCSI_PARAM_PASSWORD: 3673 len = sysfs_emit(buf, "%s\n", session->password); 3674 break; 3675 case ISCSI_PARAM_PASSWORD_IN: 3676 len = sysfs_emit(buf, "%s\n", session->password_in); 3677 break; 3678 case ISCSI_PARAM_IFACE_NAME: 3679 len = sysfs_emit(buf, "%s\n", session->ifacename); 3680 break; 3681 case ISCSI_PARAM_INITIATOR_NAME: 3682 len = sysfs_emit(buf, "%s\n", session->initiatorname); 3683 break; 3684 case ISCSI_PARAM_BOOT_ROOT: 3685 len = sysfs_emit(buf, "%s\n", session->boot_root); 3686 break; 3687 case ISCSI_PARAM_BOOT_NIC: 3688 len = sysfs_emit(buf, "%s\n", session->boot_nic); 3689 break; 3690 case ISCSI_PARAM_BOOT_TARGET: 3691 len = sysfs_emit(buf, "%s\n", session->boot_target); 3692 break; 3693 case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: 3694 len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable); 3695 break; 3696 case ISCSI_PARAM_DISCOVERY_SESS: 3697 len = sysfs_emit(buf, "%u\n", session->discovery_sess); 3698 break; 3699 case ISCSI_PARAM_PORTAL_TYPE: 3700 len = sysfs_emit(buf, "%s\n", session->portal_type); 3701 break; 3702 case ISCSI_PARAM_CHAP_AUTH_EN: 3703 len = sysfs_emit(buf, "%u\n", session->chap_auth_en); 3704 break; 3705 case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: 3706 len = sysfs_emit(buf, "%u\n", session->discovery_logout_en); 3707 break; 3708 case ISCSI_PARAM_BIDI_CHAP_EN: 3709 len = sysfs_emit(buf, "%u\n", session->bidi_chap_en); 3710 break; 3711 case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: 3712 len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional); 3713 break; 3714 case ISCSI_PARAM_DEF_TIME2WAIT: 3715 len = sysfs_emit(buf, "%d\n", session->time2wait); 3716 break; 3717 case ISCSI_PARAM_DEF_TIME2RETAIN: 3718 len = sysfs_emit(buf, "%d\n", session->time2retain); 3719 break; 3720 case ISCSI_PARAM_TSID: 3721 len = sysfs_emit(buf, "%u\n", session->tsid); 3722 break; 3723 case ISCSI_PARAM_ISID: 3724 len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n", 3725 session->isid[0], session->isid[1], 3726 session->isid[2], session->isid[3], 3727 session->isid[4], session->isid[5]); 3728 break; 3729 case ISCSI_PARAM_DISCOVERY_PARENT_IDX: 3730 len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx); 3731 break; 3732 case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: 3733 if (session->discovery_parent_type) 3734 len = sysfs_emit(buf, "%s\n", 3735 session->discovery_parent_type); 3736 else 3737 len = sysfs_emit(buf, "\n"); 3738 break; 3739 default: 3740 return -ENOSYS; 3741 } 3742 3743 return len; 3744 } 3745 EXPORT_SYMBOL_GPL(iscsi_session_get_param); 3746 3747 int iscsi_conn_get_addr_param(struct sockaddr_storage *addr, 3748 enum iscsi_param param, char *buf) 3749 { 3750 struct sockaddr_in6 *sin6 = NULL; 3751 struct sockaddr_in *sin = NULL; 3752 int len; 3753 3754 switch (addr->ss_family) { 3755 case AF_INET: 3756 sin = (struct sockaddr_in *)addr; 3757 break; 3758 case AF_INET6: 3759 sin6 = (struct sockaddr_in6 *)addr; 3760 break; 3761 default: 3762 return -EINVAL; 3763 } 3764 3765 switch (param) { 3766 case ISCSI_PARAM_CONN_ADDRESS: 3767 case ISCSI_HOST_PARAM_IPADDRESS: 3768 if (sin) 3769 len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr); 3770 else 3771 len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr); 3772 break; 3773 case ISCSI_PARAM_CONN_PORT: 3774 case ISCSI_PARAM_LOCAL_PORT: 3775 if (sin) 3776 len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port)); 3777 else 3778 len = sysfs_emit(buf, "%hu\n", 3779 be16_to_cpu(sin6->sin6_port)); 3780 break; 3781 default: 3782 return -EINVAL; 3783 } 3784 3785 return len; 3786 } 3787 EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param); 3788 3789 int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, 3790 enum iscsi_param param, char *buf) 3791 { 3792 struct iscsi_conn *conn = cls_conn->dd_data; 3793 int len; 3794 3795 switch(param) { 3796 case ISCSI_PARAM_PING_TMO: 3797 len = sysfs_emit(buf, "%u\n", conn->ping_timeout); 3798 break; 3799 case ISCSI_PARAM_RECV_TMO: 3800 len = sysfs_emit(buf, "%u\n", conn->recv_timeout); 3801 break; 3802 case ISCSI_PARAM_MAX_RECV_DLENGTH: 3803 len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength); 3804 break; 3805 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 3806 len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength); 3807 break; 3808 case ISCSI_PARAM_HDRDGST_EN: 3809 len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en); 3810 break; 3811 case ISCSI_PARAM_DATADGST_EN: 3812 len = sysfs_emit(buf, "%d\n", conn->datadgst_en); 3813 break; 3814 case ISCSI_PARAM_IFMARKER_EN: 3815 len = sysfs_emit(buf, "%d\n", conn->ifmarker_en); 3816 break; 3817 case ISCSI_PARAM_OFMARKER_EN: 3818 len = sysfs_emit(buf, "%d\n", conn->ofmarker_en); 3819 break; 3820 case ISCSI_PARAM_EXP_STATSN: 3821 len = sysfs_emit(buf, "%u\n", conn->exp_statsn); 3822 break; 3823 case ISCSI_PARAM_PERSISTENT_PORT: 3824 len = sysfs_emit(buf, "%d\n", conn->persistent_port); 3825 break; 3826 case ISCSI_PARAM_PERSISTENT_ADDRESS: 3827 len = sysfs_emit(buf, "%s\n", conn->persistent_address); 3828 break; 3829 case ISCSI_PARAM_STATSN: 3830 len = sysfs_emit(buf, "%u\n", conn->statsn); 3831 break; 3832 case ISCSI_PARAM_MAX_SEGMENT_SIZE: 3833 len = sysfs_emit(buf, "%u\n", conn->max_segment_size); 3834 break; 3835 case ISCSI_PARAM_KEEPALIVE_TMO: 3836 len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo); 3837 break; 3838 case ISCSI_PARAM_LOCAL_PORT: 3839 len = sysfs_emit(buf, "%u\n", conn->local_port); 3840 break; 3841 case ISCSI_PARAM_TCP_TIMESTAMP_STAT: 3842 len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat); 3843 break; 3844 case ISCSI_PARAM_TCP_NAGLE_DISABLE: 3845 len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable); 3846 break; 3847 case ISCSI_PARAM_TCP_WSF_DISABLE: 3848 len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable); 3849 break; 3850 case ISCSI_PARAM_TCP_TIMER_SCALE: 3851 len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale); 3852 break; 3853 case ISCSI_PARAM_TCP_TIMESTAMP_EN: 3854 len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en); 3855 break; 3856 case ISCSI_PARAM_IP_FRAGMENT_DISABLE: 3857 len = sysfs_emit(buf, "%u\n", conn->fragment_disable); 3858 break; 3859 case ISCSI_PARAM_IPV4_TOS: 3860 len = sysfs_emit(buf, "%u\n", conn->ipv4_tos); 3861 break; 3862 case ISCSI_PARAM_IPV6_TC: 3863 len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class); 3864 break; 3865 case ISCSI_PARAM_IPV6_FLOW_LABEL: 3866 len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label); 3867 break; 3868 case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: 3869 len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6); 3870 break; 3871 case ISCSI_PARAM_TCP_XMIT_WSF: 3872 len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf); 3873 break; 3874 case ISCSI_PARAM_TCP_RECV_WSF: 3875 len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf); 3876 break; 3877 case ISCSI_PARAM_LOCAL_IPADDR: 3878 len = sysfs_emit(buf, "%s\n", conn->local_ipaddr); 3879 break; 3880 default: 3881 return -ENOSYS; 3882 } 3883 3884 return len; 3885 } 3886 EXPORT_SYMBOL_GPL(iscsi_conn_get_param); 3887 3888 int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, 3889 char *buf) 3890 { 3891 struct iscsi_host *ihost = shost_priv(shost); 3892 int len; 3893 3894 switch (param) { 3895 case ISCSI_HOST_PARAM_NETDEV_NAME: 3896 len = sysfs_emit(buf, "%s\n", ihost->netdev); 3897 break; 3898 case ISCSI_HOST_PARAM_HWADDRESS: 3899 len = sysfs_emit(buf, "%s\n", ihost->hwaddress); 3900 break; 3901 case ISCSI_HOST_PARAM_INITIATOR_NAME: 3902 len = sysfs_emit(buf, "%s\n", ihost->initiatorname); 3903 break; 3904 default: 3905 return -ENOSYS; 3906 } 3907 3908 return len; 3909 } 3910 EXPORT_SYMBOL_GPL(iscsi_host_get_param); 3911 3912 int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, 3913 char *buf, int buflen) 3914 { 3915 struct iscsi_host *ihost = shost_priv(shost); 3916 3917 switch (param) { 3918 case ISCSI_HOST_PARAM_NETDEV_NAME: 3919 return iscsi_switch_str_param(&ihost->netdev, buf); 3920 case ISCSI_HOST_PARAM_HWADDRESS: 3921 return iscsi_switch_str_param(&ihost->hwaddress, buf); 3922 case ISCSI_HOST_PARAM_INITIATOR_NAME: 3923 return iscsi_switch_str_param(&ihost->initiatorname, buf); 3924 default: 3925 return -ENOSYS; 3926 } 3927 3928 return 0; 3929 } 3930 EXPORT_SYMBOL_GPL(iscsi_host_set_param); 3931 3932 MODULE_AUTHOR("Mike Christie"); 3933 MODULE_DESCRIPTION("iSCSI library functions"); 3934 MODULE_LICENSE("GPL"); 3935