xref: /linux/drivers/scsi/libiscsi.c (revision f7511d5f66f01fc451747b24e79f3ada7a3af9af)
1 /*
2  * iSCSI lib functions
3  *
4  * Copyright (C) 2006 Red Hat, Inc.  All rights reserved.
5  * Copyright (C) 2004 - 2006 Mike Christie
6  * Copyright (C) 2004 - 2005 Dmitry Yusupov
7  * Copyright (C) 2004 - 2005 Alex Aizman
8  * maintained by open-iscsi@googlegroups.com
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  */
24 #include <linux/types.h>
25 #include <linux/kfifo.h>
26 #include <linux/delay.h>
27 #include <linux/log2.h>
28 #include <asm/unaligned.h>
29 #include <net/tcp.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_tcq.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
36 #include <scsi/iscsi_proto.h>
37 #include <scsi/scsi_transport.h>
38 #include <scsi/scsi_transport_iscsi.h>
39 #include <scsi/libiscsi.h>
40 
41 struct iscsi_session *
42 class_to_transport_session(struct iscsi_cls_session *cls_session)
43 {
44 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
45 	return iscsi_hostdata(shost->hostdata);
46 }
47 EXPORT_SYMBOL_GPL(class_to_transport_session);
48 
49 /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
50 #define SNA32_CHECK 2147483648UL
51 
52 static int iscsi_sna_lt(u32 n1, u32 n2)
53 {
54 	return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
55 			    (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
56 }
57 
58 /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
59 static int iscsi_sna_lte(u32 n1, u32 n2)
60 {
61 	return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
62 			    (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
63 }
64 
65 void
66 iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
67 {
68 	uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
69 	uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
70 
71 	/*
72 	 * standard specifies this check for when to update expected and
73 	 * max sequence numbers
74 	 */
75 	if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
76 		return;
77 
78 	if (exp_cmdsn != session->exp_cmdsn &&
79 	    !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
80 		session->exp_cmdsn = exp_cmdsn;
81 
82 	if (max_cmdsn != session->max_cmdsn &&
83 	    !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
84 		session->max_cmdsn = max_cmdsn;
85 		/*
86 		 * if the window closed with IO queued, then kick the
87 		 * xmit thread
88 		 */
89 		if (!list_empty(&session->leadconn->xmitqueue) ||
90 		    !list_empty(&session->leadconn->mgmtqueue))
91 			scsi_queue_work(session->host,
92 					&session->leadconn->xmitwork);
93 	}
94 }
95 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
96 
97 void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
98 				   struct iscsi_data *hdr)
99 {
100 	struct iscsi_conn *conn = ctask->conn;
101 
102 	memset(hdr, 0, sizeof(struct iscsi_data));
103 	hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
104 	hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
105 	ctask->unsol_datasn++;
106 	hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
107 	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
108 
109 	hdr->itt = ctask->hdr->itt;
110 	hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
111 	hdr->offset = cpu_to_be32(ctask->unsol_offset);
112 
113 	if (ctask->unsol_count > conn->max_xmit_dlength) {
114 		hton24(hdr->dlength, conn->max_xmit_dlength);
115 		ctask->data_count = conn->max_xmit_dlength;
116 		ctask->unsol_offset += ctask->data_count;
117 		hdr->flags = 0;
118 	} else {
119 		hton24(hdr->dlength, ctask->unsol_count);
120 		ctask->data_count = ctask->unsol_count;
121 		hdr->flags = ISCSI_FLAG_CMD_FINAL;
122 	}
123 }
124 EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
125 
126 static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
127 {
128 	unsigned exp_len = ctask->hdr_len + len;
129 
130 	if (exp_len > ctask->hdr_max) {
131 		WARN_ON(1);
132 		return -EINVAL;
133 	}
134 
135 	WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
136 	ctask->hdr_len = exp_len;
137 	return 0;
138 }
139 
140 /*
141  * make an extended cdb AHS
142  */
143 static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
144 {
145 	struct scsi_cmnd *cmd = ctask->sc;
146 	unsigned rlen, pad_len;
147 	unsigned short ahslength;
148 	struct iscsi_ecdb_ahdr *ecdb_ahdr;
149 	int rc;
150 
151 	ecdb_ahdr = iscsi_next_hdr(ctask);
152 	rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
153 
154 	BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
155 	ahslength = rlen + sizeof(ecdb_ahdr->reserved);
156 
157 	pad_len = iscsi_padding(rlen);
158 
159 	rc = iscsi_add_hdr(ctask, sizeof(ecdb_ahdr->ahslength) +
160 	                   sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
161 	if (rc)
162 		return rc;
163 
164 	if (pad_len)
165 		memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
166 
167 	ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
168 	ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
169 	ecdb_ahdr->reserved = 0;
170 	memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
171 
172 	debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
173 		   "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
174 		   cmd->cmd_len, rlen, pad_len, ahslength, ctask->hdr_len);
175 
176 	return 0;
177 }
178 
179 static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask)
180 {
181 	struct scsi_cmnd *sc = ctask->sc;
182 	struct iscsi_rlength_ahdr *rlen_ahdr;
183 	int rc;
184 
185 	rlen_ahdr = iscsi_next_hdr(ctask);
186 	rc = iscsi_add_hdr(ctask, sizeof(*rlen_ahdr));
187 	if (rc)
188 		return rc;
189 
190 	rlen_ahdr->ahslength =
191 		cpu_to_be16(sizeof(rlen_ahdr->read_length) +
192 						  sizeof(rlen_ahdr->reserved));
193 	rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
194 	rlen_ahdr->reserved = 0;
195 	rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
196 
197 	debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
198 		   "rlen_ahdr->ahslength(%d)\n",
199 		   be32_to_cpu(rlen_ahdr->read_length),
200 		   be16_to_cpu(rlen_ahdr->ahslength));
201 	return 0;
202 }
203 
204 /**
205  * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
206  * @ctask: iscsi cmd task
207  *
208  * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
209  * fields like dlength or final based on how much data it sends
210  */
211 static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
212 {
213 	struct iscsi_conn *conn = ctask->conn;
214 	struct iscsi_session *session = conn->session;
215 	struct iscsi_cmd *hdr = ctask->hdr;
216 	struct scsi_cmnd *sc = ctask->sc;
217 	unsigned hdrlength, cmd_len;
218 	int rc;
219 
220 	ctask->hdr_len = 0;
221 	rc = iscsi_add_hdr(ctask, sizeof(*hdr));
222 	if (rc)
223 		return rc;
224 	hdr->opcode = ISCSI_OP_SCSI_CMD;
225 	hdr->flags = ISCSI_ATTR_SIMPLE;
226 	int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
227 	hdr->itt = build_itt(ctask->itt, session->age);
228 	hdr->cmdsn = cpu_to_be32(session->cmdsn);
229 	session->cmdsn++;
230 	hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
231 	cmd_len = sc->cmd_len;
232 	if (cmd_len < ISCSI_CDB_SIZE)
233 		memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
234 	else if (cmd_len > ISCSI_CDB_SIZE) {
235 		rc = iscsi_prep_ecdb_ahs(ctask);
236 		if (rc)
237 			return rc;
238 		cmd_len = ISCSI_CDB_SIZE;
239 	}
240 	memcpy(hdr->cdb, sc->cmnd, cmd_len);
241 
242 	ctask->imm_count = 0;
243 	if (scsi_bidi_cmnd(sc)) {
244 		hdr->flags |= ISCSI_FLAG_CMD_READ;
245 		rc = iscsi_prep_bidi_ahs(ctask);
246 		if (rc)
247 			return rc;
248 	}
249 	if (sc->sc_data_direction == DMA_TO_DEVICE) {
250 		unsigned out_len = scsi_out(sc)->length;
251 		hdr->data_length = cpu_to_be32(out_len);
252 		hdr->flags |= ISCSI_FLAG_CMD_WRITE;
253 		/*
254 		 * Write counters:
255 		 *
256 		 *	imm_count	bytes to be sent right after
257 		 *			SCSI PDU Header
258 		 *
259 		 *	unsol_count	bytes(as Data-Out) to be sent
260 		 *			without	R2T ack right after
261 		 *			immediate data
262 		 *
263 		 *	r2t_data_count	bytes to be sent via R2T ack's
264 		 *
265 		 *      pad_count       bytes to be sent as zero-padding
266 		 */
267 		ctask->unsol_count = 0;
268 		ctask->unsol_offset = 0;
269 		ctask->unsol_datasn = 0;
270 
271 		if (session->imm_data_en) {
272 			if (out_len >= session->first_burst)
273 				ctask->imm_count = min(session->first_burst,
274 							conn->max_xmit_dlength);
275 			else
276 				ctask->imm_count = min(out_len,
277 							conn->max_xmit_dlength);
278 			hton24(hdr->dlength, ctask->imm_count);
279 		} else
280 			zero_data(hdr->dlength);
281 
282 		if (!session->initial_r2t_en) {
283 			ctask->unsol_count = min(session->first_burst, out_len)
284 							     - ctask->imm_count;
285 			ctask->unsol_offset = ctask->imm_count;
286 		}
287 
288 		if (!ctask->unsol_count)
289 			/* No unsolicit Data-Out's */
290 			hdr->flags |= ISCSI_FLAG_CMD_FINAL;
291 	} else {
292 		hdr->flags |= ISCSI_FLAG_CMD_FINAL;
293 		zero_data(hdr->dlength);
294 		hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
295 
296 		if (sc->sc_data_direction == DMA_FROM_DEVICE)
297 			hdr->flags |= ISCSI_FLAG_CMD_READ;
298 	}
299 
300 	/* calculate size of additional header segments (AHSs) */
301 	hdrlength = ctask->hdr_len - sizeof(*hdr);
302 
303 	WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
304 	hdrlength /= ISCSI_PAD_LEN;
305 
306 	WARN_ON(hdrlength >= 256);
307 	hdr->hlength = hdrlength & 0xFF;
308 
309 	if (conn->session->tt->init_cmd_task(conn->ctask))
310 		return EIO;
311 
312 	conn->scsicmd_pdus_cnt++;
313 	debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x "
314 		"len %d bidi_len %d cmdsn %d win %d]\n",
315 		scsi_bidi_cmnd(sc) ? "bidirectional" :
316 		     sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
317 		conn->id, sc, sc->cmnd[0], ctask->itt,
318 		scsi_bufflen(sc), scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
319 		session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
320 	return 0;
321 }
322 
323 /**
324  * iscsi_complete_command - return command back to scsi-ml
325  * @ctask: iscsi cmd task
326  *
327  * Must be called with session lock.
328  * This function returns the scsi command to scsi-ml and returns
329  * the cmd task to the pool of available cmd tasks.
330  */
331 static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
332 {
333 	struct iscsi_conn *conn = ctask->conn;
334 	struct iscsi_session *session = conn->session;
335 	struct scsi_cmnd *sc = ctask->sc;
336 
337 	ctask->state = ISCSI_TASK_COMPLETED;
338 	ctask->sc = NULL;
339 	/* SCSI eh reuses commands to verify us */
340 	sc->SCp.ptr = NULL;
341 	if (conn->ctask == ctask)
342 		conn->ctask = NULL;
343 	list_del_init(&ctask->running);
344 	__kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
345 	sc->scsi_done(sc);
346 }
347 
348 static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
349 {
350 	atomic_inc(&ctask->refcount);
351 }
352 
353 static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
354 {
355 	if (atomic_dec_and_test(&ctask->refcount))
356 		iscsi_complete_command(ctask);
357 }
358 
359 /*
360  * session lock must be held
361  */
362 static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
363 			 int err)
364 {
365 	struct scsi_cmnd *sc;
366 
367 	sc = ctask->sc;
368 	if (!sc)
369 		return;
370 
371 	if (ctask->state == ISCSI_TASK_PENDING)
372 		/*
373 		 * cmd never made it to the xmit thread, so we should not count
374 		 * the cmd in the sequencing
375 		 */
376 		conn->session->queued_cmdsn--;
377 	else
378 		conn->session->tt->cleanup_cmd_task(conn, ctask);
379 
380 	sc->result = err;
381 	if (!scsi_bidi_cmnd(sc))
382 		scsi_set_resid(sc, scsi_bufflen(sc));
383 	else {
384 		scsi_out(sc)->resid = scsi_out(sc)->length;
385 		scsi_in(sc)->resid = scsi_in(sc)->length;
386 	}
387 	if (conn->ctask == ctask)
388 		conn->ctask = NULL;
389 	/* release ref from queuecommand */
390 	__iscsi_put_ctask(ctask);
391 }
392 
393 /**
394  * iscsi_free_mgmt_task - return mgmt task back to pool
395  * @conn: iscsi connection
396  * @mtask: mtask
397  *
398  * Must be called with session lock.
399  */
400 void iscsi_free_mgmt_task(struct iscsi_conn *conn,
401 			  struct iscsi_mgmt_task *mtask)
402 {
403 	list_del_init(&mtask->running);
404 	if (conn->login_mtask == mtask)
405 		return;
406 
407 	if (conn->ping_mtask == mtask)
408 		conn->ping_mtask = NULL;
409 	__kfifo_put(conn->session->mgmtpool.queue,
410 		    (void*)&mtask, sizeof(void*));
411 }
412 EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
413 
414 static struct iscsi_mgmt_task *
415 __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
416 		      char *data, uint32_t data_size)
417 {
418 	struct iscsi_session *session = conn->session;
419 	struct iscsi_mgmt_task *mtask;
420 
421 	if (session->state == ISCSI_STATE_TERMINATE)
422 		return NULL;
423 
424 	if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
425 	    hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
426 		/*
427 		 * Login and Text are sent serially, in
428 		 * request-followed-by-response sequence.
429 		 * Same mtask can be used. Same ITT must be used.
430 		 * Note that login_mtask is preallocated at conn_create().
431 		 */
432 		mtask = conn->login_mtask;
433 	else {
434 		BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
435 		BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
436 
437 		if (!__kfifo_get(session->mgmtpool.queue,
438 				 (void*)&mtask, sizeof(void*)))
439 			return NULL;
440 	}
441 
442 	if (data_size) {
443 		memcpy(mtask->data, data, data_size);
444 		mtask->data_count = data_size;
445 	} else
446 		mtask->data_count = 0;
447 
448 	memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
449 	INIT_LIST_HEAD(&mtask->running);
450 	list_add_tail(&mtask->running, &conn->mgmtqueue);
451 	return mtask;
452 }
453 
454 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
455 			char *data, uint32_t data_size)
456 {
457 	struct iscsi_conn *conn = cls_conn->dd_data;
458 	struct iscsi_session *session = conn->session;
459 	int err = 0;
460 
461 	spin_lock_bh(&session->lock);
462 	if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
463 		err = -EPERM;
464 	spin_unlock_bh(&session->lock);
465 	scsi_queue_work(session->host, &conn->xmitwork);
466 	return err;
467 }
468 EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
469 
470 /**
471  * iscsi_cmd_rsp - SCSI Command Response processing
472  * @conn: iscsi connection
473  * @hdr: iscsi header
474  * @ctask: scsi command task
475  * @data: cmd data buffer
476  * @datalen: len of buffer
477  *
478  * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
479  * then completes the command and task.
480  **/
481 static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
482 			       struct iscsi_cmd_task *ctask, char *data,
483 			       int datalen)
484 {
485 	struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
486 	struct iscsi_session *session = conn->session;
487 	struct scsi_cmnd *sc = ctask->sc;
488 
489 	iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
490 	conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
491 
492 	sc->result = (DID_OK << 16) | rhdr->cmd_status;
493 
494 	if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
495 		sc->result = DID_ERROR << 16;
496 		goto out;
497 	}
498 
499 	if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
500 		uint16_t senselen;
501 
502 		if (datalen < 2) {
503 invalid_datalen:
504 			iscsi_conn_printk(KERN_ERR,  conn,
505 					 "Got CHECK_CONDITION but invalid data "
506 					 "buffer size of %d\n", datalen);
507 			sc->result = DID_BAD_TARGET << 16;
508 			goto out;
509 		}
510 
511 		senselen = be16_to_cpu(get_unaligned((__be16 *) data));
512 		if (datalen < senselen)
513 			goto invalid_datalen;
514 
515 		memcpy(sc->sense_buffer, data + 2,
516 		       min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
517 		debug_scsi("copied %d bytes of sense\n",
518 			   min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
519 	}
520 
521 	if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
522 			   ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
523 		int res_count = be32_to_cpu(rhdr->bi_residual_count);
524 
525 		if (scsi_bidi_cmnd(sc) && res_count > 0 &&
526 				(rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
527 				 res_count <= scsi_in(sc)->length))
528 			scsi_in(sc)->resid = res_count;
529 		else
530 			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
531 	}
532 
533 	if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
534 	                   ISCSI_FLAG_CMD_OVERFLOW)) {
535 		int res_count = be32_to_cpu(rhdr->residual_count);
536 
537 		if (res_count > 0 &&
538 		    (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
539 		     res_count <= scsi_bufflen(sc)))
540 			/* write side for bidi or uni-io set_resid */
541 			scsi_set_resid(sc, res_count);
542 		else
543 			sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
544 	}
545 out:
546 	debug_scsi("done [sc %lx res %d itt 0x%x]\n",
547 		   (long)sc, sc->result, ctask->itt);
548 	conn->scsirsp_pdus_cnt++;
549 
550 	__iscsi_put_ctask(ctask);
551 }
552 
553 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
554 {
555 	struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
556 
557 	conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
558 	conn->tmfrsp_pdus_cnt++;
559 
560 	if (conn->tmf_state != TMF_QUEUED)
561 		return;
562 
563 	if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
564 		conn->tmf_state = TMF_SUCCESS;
565 	else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
566 		conn->tmf_state = TMF_NOT_FOUND;
567 	else
568 		conn->tmf_state = TMF_FAILED;
569 	wake_up(&conn->ehwait);
570 }
571 
572 static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
573 {
574         struct iscsi_nopout hdr;
575 	struct iscsi_mgmt_task *mtask;
576 
577 	if (!rhdr && conn->ping_mtask)
578 		return;
579 
580 	memset(&hdr, 0, sizeof(struct iscsi_nopout));
581 	hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
582 	hdr.flags = ISCSI_FLAG_CMD_FINAL;
583 
584 	if (rhdr) {
585 		memcpy(hdr.lun, rhdr->lun, 8);
586 		hdr.ttt = rhdr->ttt;
587 		hdr.itt = RESERVED_ITT;
588 	} else
589 		hdr.ttt = RESERVED_ITT;
590 
591 	mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
592 	if (!mtask) {
593 		iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
594 		return;
595 	}
596 
597 	/* only track our nops */
598 	if (!rhdr) {
599 		conn->ping_mtask = mtask;
600 		conn->last_ping = jiffies;
601 	}
602 	scsi_queue_work(conn->session->host, &conn->xmitwork);
603 }
604 
605 static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
606 			       char *data, int datalen)
607 {
608 	struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
609 	struct iscsi_hdr rejected_pdu;
610 	uint32_t itt;
611 
612 	conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
613 
614 	if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) {
615 		if (ntoh24(reject->dlength) > datalen)
616 			return ISCSI_ERR_PROTO;
617 
618 		if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
619 			memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
620 			itt = get_itt(rejected_pdu.itt);
621 			iscsi_conn_printk(KERN_ERR, conn,
622 					  "itt 0x%x had pdu (op 0x%x) rejected "
623 					  "due to DataDigest error.\n", itt,
624 					  rejected_pdu.opcode);
625 		}
626 	}
627 	return 0;
628 }
629 
630 /**
631  * __iscsi_complete_pdu - complete pdu
632  * @conn: iscsi conn
633  * @hdr: iscsi header
634  * @data: data buffer
635  * @datalen: len of data buffer
636  *
637  * Completes pdu processing by freeing any resources allocated at
638  * queuecommand or send generic. session lock must be held and verify
639  * itt must have been called.
640  */
641 static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
642 				char *data, int datalen)
643 {
644 	struct iscsi_session *session = conn->session;
645 	int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
646 	struct iscsi_cmd_task *ctask;
647 	struct iscsi_mgmt_task *mtask;
648 	uint32_t itt;
649 
650 	conn->last_recv = jiffies;
651 	if (hdr->itt != RESERVED_ITT)
652 		itt = get_itt(hdr->itt);
653 	else
654 		itt = ~0U;
655 
656 	if (itt < session->cmds_max) {
657 		ctask = session->cmds[itt];
658 
659 		debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
660 			   opcode, conn->id, ctask->itt, datalen);
661 
662 		switch(opcode) {
663 		case ISCSI_OP_SCSI_CMD_RSP:
664 			BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
665 			iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
666 					   datalen);
667 			break;
668 		case ISCSI_OP_SCSI_DATA_IN:
669 			BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
670 			if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
671 				conn->scsirsp_pdus_cnt++;
672 				__iscsi_put_ctask(ctask);
673 			}
674 			break;
675 		case ISCSI_OP_R2T:
676 			/* LLD handles this for now */
677 			break;
678 		default:
679 			rc = ISCSI_ERR_BAD_OPCODE;
680 			break;
681 		}
682 	} else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
683 		   itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
684 		mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
685 
686 		debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
687 			   opcode, conn->id, mtask->itt, datalen);
688 
689 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
690 		switch(opcode) {
691 		case ISCSI_OP_LOGOUT_RSP:
692 			if (datalen) {
693 				rc = ISCSI_ERR_PROTO;
694 				break;
695 			}
696 			conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
697 			/* fall through */
698 		case ISCSI_OP_LOGIN_RSP:
699 		case ISCSI_OP_TEXT_RSP:
700 			/*
701 			 * login related PDU's exp_statsn is handled in
702 			 * userspace
703 			 */
704 			if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
705 				rc = ISCSI_ERR_CONN_FAILED;
706 			iscsi_free_mgmt_task(conn, mtask);
707 			break;
708 		case ISCSI_OP_SCSI_TMFUNC_RSP:
709 			if (datalen) {
710 				rc = ISCSI_ERR_PROTO;
711 				break;
712 			}
713 
714 			iscsi_tmf_rsp(conn, hdr);
715 			iscsi_free_mgmt_task(conn, mtask);
716 			break;
717 		case ISCSI_OP_NOOP_IN:
718 			if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
719 			    datalen) {
720 				rc = ISCSI_ERR_PROTO;
721 				break;
722 			}
723 			conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
724 
725 			if (conn->ping_mtask != mtask) {
726 				/*
727 				 * If this is not in response to one of our
728 				 * nops then it must be from userspace.
729 				 */
730 				if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
731 						   datalen))
732 					rc = ISCSI_ERR_CONN_FAILED;
733 			}
734 			iscsi_free_mgmt_task(conn, mtask);
735 			break;
736 		default:
737 			rc = ISCSI_ERR_BAD_OPCODE;
738 			break;
739 		}
740 	} else if (itt == ~0U) {
741 		iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
742 
743 		switch(opcode) {
744 		case ISCSI_OP_NOOP_IN:
745 			if (datalen) {
746 				rc = ISCSI_ERR_PROTO;
747 				break;
748 			}
749 
750 			if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
751 				break;
752 
753 			iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
754 			break;
755 		case ISCSI_OP_REJECT:
756 			rc = iscsi_handle_reject(conn, hdr, data, datalen);
757 			break;
758 		case ISCSI_OP_ASYNC_EVENT:
759 			conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
760 			if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
761 				rc = ISCSI_ERR_CONN_FAILED;
762 			break;
763 		default:
764 			rc = ISCSI_ERR_BAD_OPCODE;
765 			break;
766 		}
767 	} else
768 		rc = ISCSI_ERR_BAD_ITT;
769 
770 	return rc;
771 }
772 
773 int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
774 		       char *data, int datalen)
775 {
776 	int rc;
777 
778 	spin_lock(&conn->session->lock);
779 	rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
780 	spin_unlock(&conn->session->lock);
781 	return rc;
782 }
783 EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
784 
785 /* verify itt (itt encoding: age+cid+itt) */
786 int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
787 		     uint32_t *ret_itt)
788 {
789 	struct iscsi_session *session = conn->session;
790 	struct iscsi_cmd_task *ctask;
791 	uint32_t itt;
792 
793 	if (hdr->itt != RESERVED_ITT) {
794 		if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
795 		    (session->age << ISCSI_AGE_SHIFT)) {
796 			iscsi_conn_printk(KERN_ERR, conn,
797 					  "received itt %x expected session "
798 					  "age (%x)\n", (__force u32)hdr->itt,
799 					  session->age & ISCSI_AGE_MASK);
800 			return ISCSI_ERR_BAD_ITT;
801 		}
802 
803 		itt = get_itt(hdr->itt);
804 	} else
805 		itt = ~0U;
806 
807 	if (itt < session->cmds_max) {
808 		ctask = session->cmds[itt];
809 
810 		if (!ctask->sc) {
811 			iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
812 					  "with itt 0x%x\n", ctask->itt);
813 			/* force drop */
814 			return ISCSI_ERR_NO_SCSI_CMD;
815 		}
816 
817 		if (ctask->sc->SCp.phase != session->age) {
818 			iscsi_conn_printk(KERN_ERR, conn,
819 					  "iscsi: ctask's session age %d, "
820 					  "expected %d\n", ctask->sc->SCp.phase,
821 					  session->age);
822 			return ISCSI_ERR_SESSION_FAILED;
823 		}
824 	}
825 
826 	*ret_itt = itt;
827 	return 0;
828 }
829 EXPORT_SYMBOL_GPL(iscsi_verify_itt);
830 
831 void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
832 {
833 	struct iscsi_session *session = conn->session;
834 	unsigned long flags;
835 
836 	spin_lock_irqsave(&session->lock, flags);
837 	if (session->state == ISCSI_STATE_FAILED) {
838 		spin_unlock_irqrestore(&session->lock, flags);
839 		return;
840 	}
841 
842 	if (conn->stop_stage == 0)
843 		session->state = ISCSI_STATE_FAILED;
844 	spin_unlock_irqrestore(&session->lock, flags);
845 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
846 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
847 	iscsi_conn_error(conn->cls_conn, err);
848 }
849 EXPORT_SYMBOL_GPL(iscsi_conn_failure);
850 
851 static void iscsi_prep_mtask(struct iscsi_conn *conn,
852 			     struct iscsi_mgmt_task *mtask)
853 {
854 	struct iscsi_session *session = conn->session;
855 	struct iscsi_hdr *hdr = mtask->hdr;
856 	struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
857 
858 	if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
859 	    hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
860 		nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
861 	/*
862 	 * pre-format CmdSN for outgoing PDU.
863 	 */
864 	nop->cmdsn = cpu_to_be32(session->cmdsn);
865 	if (hdr->itt != RESERVED_ITT) {
866 		hdr->itt = build_itt(mtask->itt, session->age);
867 		/*
868 		 * TODO: We always use immediate, so we never hit this.
869 		 * If we start to send tmfs or nops as non-immediate then
870 		 * we should start checking the cmdsn numbers for mgmt tasks.
871 		 */
872 		if (conn->c_stage == ISCSI_CONN_STARTED &&
873 		    !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
874 			session->queued_cmdsn++;
875 			session->cmdsn++;
876 		}
877 	}
878 
879 	if (session->tt->init_mgmt_task)
880 		session->tt->init_mgmt_task(conn, mtask);
881 
882 	debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
883 		   hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
884 		   mtask->data_count);
885 }
886 
887 static int iscsi_xmit_mtask(struct iscsi_conn *conn)
888 {
889 	struct iscsi_hdr *hdr = conn->mtask->hdr;
890 	int rc;
891 
892 	if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
893 		conn->session->state = ISCSI_STATE_LOGGING_OUT;
894 	spin_unlock_bh(&conn->session->lock);
895 
896 	rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
897 	spin_lock_bh(&conn->session->lock);
898 	if (rc)
899 		return rc;
900 
901 	/* done with this in-progress mtask */
902 	conn->mtask = NULL;
903 	return 0;
904 }
905 
906 static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
907 {
908 	struct iscsi_session *session = conn->session;
909 
910 	/*
911 	 * Check for iSCSI window and take care of CmdSN wrap-around
912 	 */
913 	if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
914 		debug_scsi("iSCSI CmdSN closed. ExpCmdSn %u MaxCmdSN %u "
915 			   "CmdSN %u/%u\n", session->exp_cmdsn,
916 			   session->max_cmdsn, session->cmdsn,
917 			   session->queued_cmdsn);
918 		return -ENOSPC;
919 	}
920 	return 0;
921 }
922 
923 static int iscsi_xmit_ctask(struct iscsi_conn *conn)
924 {
925 	struct iscsi_cmd_task *ctask = conn->ctask;
926 	int rc;
927 
928 	__iscsi_get_ctask(ctask);
929 	spin_unlock_bh(&conn->session->lock);
930 	rc = conn->session->tt->xmit_cmd_task(conn, ctask);
931 	spin_lock_bh(&conn->session->lock);
932 	__iscsi_put_ctask(ctask);
933 	if (!rc)
934 		/* done with this ctask */
935 		conn->ctask = NULL;
936 	return rc;
937 }
938 
939 /**
940  * iscsi_requeue_ctask - requeue ctask to run from session workqueue
941  * @ctask: ctask to requeue
942  *
943  * LLDs that need to run a ctask from the session workqueue should call
944  * this. The session lock must be held.
945  */
946 void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
947 {
948 	struct iscsi_conn *conn = ctask->conn;
949 
950 	list_move_tail(&ctask->running, &conn->requeue);
951 	scsi_queue_work(conn->session->host, &conn->xmitwork);
952 }
953 EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
954 
955 /**
956  * iscsi_data_xmit - xmit any command into the scheduled connection
957  * @conn: iscsi connection
958  *
959  * Notes:
960  *	The function can return -EAGAIN in which case the caller must
961  *	re-schedule it again later or recover. '0' return code means
962  *	successful xmit.
963  **/
964 static int iscsi_data_xmit(struct iscsi_conn *conn)
965 {
966 	int rc = 0;
967 
968 	spin_lock_bh(&conn->session->lock);
969 	if (unlikely(conn->suspend_tx)) {
970 		debug_scsi("conn %d Tx suspended!\n", conn->id);
971 		spin_unlock_bh(&conn->session->lock);
972 		return -ENODATA;
973 	}
974 
975 	if (conn->ctask) {
976 		rc = iscsi_xmit_ctask(conn);
977 		if (rc)
978 			goto again;
979 	}
980 
981 	if (conn->mtask) {
982 		rc = iscsi_xmit_mtask(conn);
983 	        if (rc)
984 		        goto again;
985 	}
986 
987 	/*
988 	 * process mgmt pdus like nops before commands since we should
989 	 * only have one nop-out as a ping from us and targets should not
990 	 * overflow us with nop-ins
991 	 */
992 check_mgmt:
993 	while (!list_empty(&conn->mgmtqueue)) {
994 		conn->mtask = list_entry(conn->mgmtqueue.next,
995 					 struct iscsi_mgmt_task, running);
996 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
997 			iscsi_free_mgmt_task(conn, conn->mtask);
998 			conn->mtask = NULL;
999 			continue;
1000 		}
1001 
1002 		iscsi_prep_mtask(conn, conn->mtask);
1003 		list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
1004 		rc = iscsi_xmit_mtask(conn);
1005 		if (rc)
1006 			goto again;
1007 	}
1008 
1009 	/* process pending command queue */
1010 	while (!list_empty(&conn->xmitqueue)) {
1011 		if (conn->tmf_state == TMF_QUEUED)
1012 			break;
1013 
1014 		conn->ctask = list_entry(conn->xmitqueue.next,
1015 					 struct iscsi_cmd_task, running);
1016 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1017 			fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
1018 			continue;
1019 		}
1020 		if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
1021 			fail_command(conn, conn->ctask, DID_ABORT << 16);
1022 			continue;
1023 		}
1024 
1025 		conn->ctask->state = ISCSI_TASK_RUNNING;
1026 		list_move_tail(conn->xmitqueue.next, &conn->run_list);
1027 		rc = iscsi_xmit_ctask(conn);
1028 		if (rc)
1029 			goto again;
1030 		/*
1031 		 * we could continuously get new ctask requests so
1032 		 * we need to check the mgmt queue for nops that need to
1033 		 * be sent to aviod starvation
1034 		 */
1035 		if (!list_empty(&conn->mgmtqueue))
1036 			goto check_mgmt;
1037 	}
1038 
1039 	while (!list_empty(&conn->requeue)) {
1040 		if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL)
1041 			break;
1042 
1043 		/*
1044 		 * we always do fastlogout - conn stop code will clean up.
1045 		 */
1046 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
1047 			break;
1048 
1049 		conn->ctask = list_entry(conn->requeue.next,
1050 					 struct iscsi_cmd_task, running);
1051 		conn->ctask->state = ISCSI_TASK_RUNNING;
1052 		list_move_tail(conn->requeue.next, &conn->run_list);
1053 		rc = iscsi_xmit_ctask(conn);
1054 		if (rc)
1055 			goto again;
1056 		if (!list_empty(&conn->mgmtqueue))
1057 			goto check_mgmt;
1058 	}
1059 	spin_unlock_bh(&conn->session->lock);
1060 	return -ENODATA;
1061 
1062 again:
1063 	if (unlikely(conn->suspend_tx))
1064 		rc = -ENODATA;
1065 	spin_unlock_bh(&conn->session->lock);
1066 	return rc;
1067 }
1068 
1069 static void iscsi_xmitworker(struct work_struct *work)
1070 {
1071 	struct iscsi_conn *conn =
1072 		container_of(work, struct iscsi_conn, xmitwork);
1073 	int rc;
1074 	/*
1075 	 * serialize Xmit worker on a per-connection basis.
1076 	 */
1077 	do {
1078 		rc = iscsi_data_xmit(conn);
1079 	} while (rc >= 0 || rc == -EAGAIN);
1080 }
1081 
1082 enum {
1083 	FAILURE_BAD_HOST = 1,
1084 	FAILURE_SESSION_FAILED,
1085 	FAILURE_SESSION_FREED,
1086 	FAILURE_WINDOW_CLOSED,
1087 	FAILURE_OOM,
1088 	FAILURE_SESSION_TERMINATE,
1089 	FAILURE_SESSION_IN_RECOVERY,
1090 	FAILURE_SESSION_RECOVERY_TIMEOUT,
1091 	FAILURE_SESSION_LOGGING_OUT,
1092 	FAILURE_SESSION_NOT_READY,
1093 };
1094 
1095 int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1096 {
1097 	struct Scsi_Host *host;
1098 	int reason = 0;
1099 	struct iscsi_session *session;
1100 	struct iscsi_conn *conn;
1101 	struct iscsi_cmd_task *ctask = NULL;
1102 
1103 	sc->scsi_done = done;
1104 	sc->result = 0;
1105 	sc->SCp.ptr = NULL;
1106 
1107 	host = sc->device->host;
1108 	spin_unlock(host->host_lock);
1109 
1110 	session = iscsi_hostdata(host->hostdata);
1111 	spin_lock(&session->lock);
1112 
1113 	reason = iscsi_session_chkready(session_to_cls(session));
1114 	if (reason) {
1115 		sc->result = reason;
1116 		goto fault;
1117 	}
1118 
1119 	/*
1120 	 * ISCSI_STATE_FAILED is a temp. state. The recovery
1121 	 * code will decide what is best to do with command queued
1122 	 * during this time
1123 	 */
1124 	if (session->state != ISCSI_STATE_LOGGED_IN &&
1125 	    session->state != ISCSI_STATE_FAILED) {
1126 		/*
1127 		 * to handle the race between when we set the recovery state
1128 		 * and block the session we requeue here (commands could
1129 		 * be entering our queuecommand while a block is starting
1130 		 * up because the block code is not locked)
1131 		 */
1132 		switch (session->state) {
1133 		case ISCSI_STATE_IN_RECOVERY:
1134 			reason = FAILURE_SESSION_IN_RECOVERY;
1135 			sc->result = DID_IMM_RETRY << 16;
1136 			break;
1137 		case ISCSI_STATE_LOGGING_OUT:
1138 			reason = FAILURE_SESSION_LOGGING_OUT;
1139 			sc->result = DID_IMM_RETRY << 16;
1140 			break;
1141 		case ISCSI_STATE_RECOVERY_FAILED:
1142 			reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1143 			sc->result = DID_NO_CONNECT << 16;
1144 			break;
1145 		case ISCSI_STATE_TERMINATE:
1146 			reason = FAILURE_SESSION_TERMINATE;
1147 			sc->result = DID_NO_CONNECT << 16;
1148 			break;
1149 		default:
1150 			reason = FAILURE_SESSION_FREED;
1151 			sc->result = DID_NO_CONNECT << 16;
1152 		}
1153 		goto fault;
1154 	}
1155 
1156 	conn = session->leadconn;
1157 	if (!conn) {
1158 		reason = FAILURE_SESSION_FREED;
1159 		sc->result = DID_NO_CONNECT << 16;
1160 		goto fault;
1161 	}
1162 
1163 	if (iscsi_check_cmdsn_window_closed(conn)) {
1164 		reason = FAILURE_WINDOW_CLOSED;
1165 		goto reject;
1166 	}
1167 
1168 	if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
1169 			 sizeof(void*))) {
1170 		reason = FAILURE_OOM;
1171 		goto reject;
1172 	}
1173 	session->queued_cmdsn++;
1174 
1175 	sc->SCp.phase = session->age;
1176 	sc->SCp.ptr = (char *)ctask;
1177 
1178 	atomic_set(&ctask->refcount, 1);
1179 	ctask->state = ISCSI_TASK_PENDING;
1180 	ctask->conn = conn;
1181 	ctask->sc = sc;
1182 	INIT_LIST_HEAD(&ctask->running);
1183 
1184 	list_add_tail(&ctask->running, &conn->xmitqueue);
1185 	spin_unlock(&session->lock);
1186 
1187 	scsi_queue_work(host, &conn->xmitwork);
1188 	spin_lock(host->host_lock);
1189 	return 0;
1190 
1191 reject:
1192 	spin_unlock(&session->lock);
1193 	debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
1194 	spin_lock(host->host_lock);
1195 	return SCSI_MLQUEUE_HOST_BUSY;
1196 
1197 fault:
1198 	spin_unlock(&session->lock);
1199 	debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
1200 	if (!scsi_bidi_cmnd(sc))
1201 		scsi_set_resid(sc, scsi_bufflen(sc));
1202 	else {
1203 		scsi_out(sc)->resid = scsi_out(sc)->length;
1204 		scsi_in(sc)->resid = scsi_in(sc)->length;
1205 	}
1206 	sc->scsi_done(sc);
1207 	spin_lock(host->host_lock);
1208 	return 0;
1209 }
1210 EXPORT_SYMBOL_GPL(iscsi_queuecommand);
1211 
1212 int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
1213 {
1214 	if (depth > ISCSI_MAX_CMD_PER_LUN)
1215 		depth = ISCSI_MAX_CMD_PER_LUN;
1216 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
1217 	return sdev->queue_depth;
1218 }
1219 EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
1220 
1221 void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
1222 {
1223 	struct iscsi_session *session = class_to_transport_session(cls_session);
1224 
1225 	spin_lock_bh(&session->lock);
1226 	if (session->state != ISCSI_STATE_LOGGED_IN) {
1227 		session->state = ISCSI_STATE_RECOVERY_FAILED;
1228 		if (session->leadconn)
1229 			wake_up(&session->leadconn->ehwait);
1230 	}
1231 	spin_unlock_bh(&session->lock);
1232 }
1233 EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
1234 
1235 int iscsi_eh_host_reset(struct scsi_cmnd *sc)
1236 {
1237 	struct Scsi_Host *host = sc->device->host;
1238 	struct iscsi_session *session = iscsi_hostdata(host->hostdata);
1239 	struct iscsi_conn *conn = session->leadconn;
1240 
1241 	mutex_lock(&session->eh_mutex);
1242 	spin_lock_bh(&session->lock);
1243 	if (session->state == ISCSI_STATE_TERMINATE) {
1244 failed:
1245 		debug_scsi("failing host reset: session terminated "
1246 			   "[CID %d age %d]\n", conn->id, session->age);
1247 		spin_unlock_bh(&session->lock);
1248 		mutex_unlock(&session->eh_mutex);
1249 		return FAILED;
1250 	}
1251 
1252 	spin_unlock_bh(&session->lock);
1253 	mutex_unlock(&session->eh_mutex);
1254 	/*
1255 	 * we drop the lock here but the leadconn cannot be destoyed while
1256 	 * we are in the scsi eh
1257 	 */
1258 	iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1259 
1260 	debug_scsi("iscsi_eh_host_reset wait for relogin\n");
1261 	wait_event_interruptible(conn->ehwait,
1262 				 session->state == ISCSI_STATE_TERMINATE ||
1263 				 session->state == ISCSI_STATE_LOGGED_IN ||
1264 				 session->state == ISCSI_STATE_RECOVERY_FAILED);
1265 	if (signal_pending(current))
1266 		flush_signals(current);
1267 
1268 	mutex_lock(&session->eh_mutex);
1269 	spin_lock_bh(&session->lock);
1270 	if (session->state == ISCSI_STATE_LOGGED_IN)
1271 		iscsi_session_printk(KERN_INFO, session,
1272 				     "host reset succeeded\n");
1273 	else
1274 		goto failed;
1275 	spin_unlock_bh(&session->lock);
1276 	mutex_unlock(&session->eh_mutex);
1277 	return SUCCESS;
1278 }
1279 EXPORT_SYMBOL_GPL(iscsi_eh_host_reset);
1280 
1281 static void iscsi_tmf_timedout(unsigned long data)
1282 {
1283 	struct iscsi_conn *conn = (struct iscsi_conn *)data;
1284 	struct iscsi_session *session = conn->session;
1285 
1286 	spin_lock(&session->lock);
1287 	if (conn->tmf_state == TMF_QUEUED) {
1288 		conn->tmf_state = TMF_TIMEDOUT;
1289 		debug_scsi("tmf timedout\n");
1290 		/* unblock eh_abort() */
1291 		wake_up(&conn->ehwait);
1292 	}
1293 	spin_unlock(&session->lock);
1294 }
1295 
1296 static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1297 				   struct iscsi_tm *hdr, int age,
1298 				   int timeout)
1299 {
1300 	struct iscsi_session *session = conn->session;
1301 	struct iscsi_mgmt_task *mtask;
1302 
1303 	mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1304 				      NULL, 0);
1305 	if (!mtask) {
1306 		spin_unlock_bh(&session->lock);
1307 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1308 		spin_lock_bh(&session->lock);
1309 		debug_scsi("tmf exec failure\n");
1310 		return -EPERM;
1311 	}
1312 	conn->tmfcmd_pdus_cnt++;
1313 	conn->tmf_timer.expires = timeout * HZ + jiffies;
1314 	conn->tmf_timer.function = iscsi_tmf_timedout;
1315 	conn->tmf_timer.data = (unsigned long)conn;
1316 	add_timer(&conn->tmf_timer);
1317 	debug_scsi("tmf set timeout\n");
1318 
1319 	spin_unlock_bh(&session->lock);
1320 	mutex_unlock(&session->eh_mutex);
1321 	scsi_queue_work(session->host, &conn->xmitwork);
1322 
1323 	/*
1324 	 * block eh thread until:
1325 	 *
1326 	 * 1) tmf response
1327 	 * 2) tmf timeout
1328 	 * 3) session is terminated or restarted or userspace has
1329 	 * given up on recovery
1330 	 */
1331 	wait_event_interruptible(conn->ehwait, age != session->age ||
1332 				 session->state != ISCSI_STATE_LOGGED_IN ||
1333 				 conn->tmf_state != TMF_QUEUED);
1334 	if (signal_pending(current))
1335 		flush_signals(current);
1336 	del_timer_sync(&conn->tmf_timer);
1337 
1338 	mutex_lock(&session->eh_mutex);
1339 	spin_lock_bh(&session->lock);
1340 	/* if the session drops it will clean up the mtask */
1341 	if (age != session->age ||
1342 	    session->state != ISCSI_STATE_LOGGED_IN)
1343 		return -ENOTCONN;
1344 	return 0;
1345 }
1346 
1347 /*
1348  * Fail commands. session lock held and recv side suspended and xmit
1349  * thread flushed
1350  */
1351 static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1352 			      int error)
1353 {
1354 	struct iscsi_cmd_task *ctask, *tmp;
1355 
1356 	if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
1357 		conn->ctask = NULL;
1358 
1359 	/* flush pending */
1360 	list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
1361 		if (lun == ctask->sc->device->lun || lun == -1) {
1362 			debug_scsi("failing pending sc %p itt 0x%x\n",
1363 				   ctask->sc, ctask->itt);
1364 			fail_command(conn, ctask, error << 16);
1365 		}
1366 	}
1367 
1368 	list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
1369 		if (lun == ctask->sc->device->lun || lun == -1) {
1370 			debug_scsi("failing requeued sc %p itt 0x%x\n",
1371 				   ctask->sc, ctask->itt);
1372 			fail_command(conn, ctask, error << 16);
1373 		}
1374 	}
1375 
1376 	/* fail all other running */
1377 	list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
1378 		if (lun == ctask->sc->device->lun || lun == -1) {
1379 			debug_scsi("failing in progress sc %p itt 0x%x\n",
1380 				   ctask->sc, ctask->itt);
1381 			fail_command(conn, ctask, DID_BUS_BUSY << 16);
1382 		}
1383 	}
1384 }
1385 
1386 static void iscsi_suspend_tx(struct iscsi_conn *conn)
1387 {
1388 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1389 	scsi_flush_work(conn->session->host);
1390 }
1391 
1392 static void iscsi_start_tx(struct iscsi_conn *conn)
1393 {
1394 	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1395 	scsi_queue_work(conn->session->host, &conn->xmitwork);
1396 }
1397 
1398 static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1399 {
1400 	struct iscsi_cls_session *cls_session;
1401 	struct iscsi_session *session;
1402 	struct iscsi_conn *conn;
1403 	enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
1404 
1405 	cls_session = starget_to_session(scsi_target(scmd->device));
1406 	session = class_to_transport_session(cls_session);
1407 
1408 	debug_scsi("scsi cmd %p timedout\n", scmd);
1409 
1410 	spin_lock(&session->lock);
1411 	if (session->state != ISCSI_STATE_LOGGED_IN) {
1412 		/*
1413 		 * We are probably in the middle of iscsi recovery so let
1414 		 * that complete and handle the error.
1415 		 */
1416 		rc = EH_RESET_TIMER;
1417 		goto done;
1418 	}
1419 
1420 	conn = session->leadconn;
1421 	if (!conn) {
1422 		/* In the middle of shuting down */
1423 		rc = EH_RESET_TIMER;
1424 		goto done;
1425 	}
1426 
1427 	if (!conn->recv_timeout && !conn->ping_timeout)
1428 		goto done;
1429 	/*
1430 	 * if the ping timedout then we are in the middle of cleaning up
1431 	 * and can let the iscsi eh handle it
1432 	 */
1433 	if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1434 			    (conn->ping_timeout * HZ), jiffies))
1435 		rc = EH_RESET_TIMER;
1436 	/*
1437 	 * if we are about to check the transport then give the command
1438 	 * more time
1439 	 */
1440 	if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1441 			   jiffies))
1442 		rc = EH_RESET_TIMER;
1443 	/* if in the middle of checking the transport then give us more time */
1444 	if (conn->ping_mtask)
1445 		rc = EH_RESET_TIMER;
1446 done:
1447 	spin_unlock(&session->lock);
1448 	debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
1449 	return rc;
1450 }
1451 
1452 static void iscsi_check_transport_timeouts(unsigned long data)
1453 {
1454 	struct iscsi_conn *conn = (struct iscsi_conn *)data;
1455 	struct iscsi_session *session = conn->session;
1456 	unsigned long timeout, next_timeout = 0, last_recv;
1457 
1458 	spin_lock(&session->lock);
1459 	if (session->state != ISCSI_STATE_LOGGED_IN)
1460 		goto done;
1461 
1462 	timeout = conn->recv_timeout;
1463 	if (!timeout)
1464 		goto done;
1465 
1466 	timeout *= HZ;
1467 	last_recv = conn->last_recv;
1468 	if (time_before_eq(last_recv + timeout + (conn->ping_timeout * HZ),
1469 			   jiffies)) {
1470 		iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
1471 				  "expired, last rx %lu, last ping %lu, "
1472 				  "now %lu\n", conn->ping_timeout, last_recv,
1473 				  conn->last_ping, jiffies);
1474 		spin_unlock(&session->lock);
1475 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1476 		return;
1477 	}
1478 
1479 	if (time_before_eq(last_recv + timeout, jiffies)) {
1480 		if (time_before_eq(conn->last_ping, last_recv)) {
1481 			/* send a ping to try to provoke some traffic */
1482 			debug_scsi("Sending nopout as ping on conn %p\n", conn);
1483 			iscsi_send_nopout(conn, NULL);
1484 		}
1485 		next_timeout = last_recv + timeout + (conn->ping_timeout * HZ);
1486 	} else
1487 		next_timeout = last_recv + timeout;
1488 
1489 	debug_scsi("Setting next tmo %lu\n", next_timeout);
1490 	mod_timer(&conn->transport_timer, next_timeout);
1491 done:
1492 	spin_unlock(&session->lock);
1493 }
1494 
1495 static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
1496 				      struct iscsi_tm *hdr)
1497 {
1498 	memset(hdr, 0, sizeof(*hdr));
1499 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1500 	hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
1501 	hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1502 	memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
1503 	hdr->rtt = ctask->hdr->itt;
1504 	hdr->refcmdsn = ctask->hdr->cmdsn;
1505 }
1506 
1507 int iscsi_eh_abort(struct scsi_cmnd *sc)
1508 {
1509 	struct Scsi_Host *host = sc->device->host;
1510 	struct iscsi_session *session = iscsi_hostdata(host->hostdata);
1511 	struct iscsi_conn *conn;
1512 	struct iscsi_cmd_task *ctask;
1513 	struct iscsi_tm *hdr;
1514 	int rc, age;
1515 
1516 	mutex_lock(&session->eh_mutex);
1517 	spin_lock_bh(&session->lock);
1518 	/*
1519 	 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
1520 	 * got the command.
1521 	 */
1522 	if (!sc->SCp.ptr) {
1523 		debug_scsi("sc never reached iscsi layer or it completed.\n");
1524 		spin_unlock_bh(&session->lock);
1525 		mutex_unlock(&session->eh_mutex);
1526 		return SUCCESS;
1527 	}
1528 
1529 	/*
1530 	 * If we are not logged in or we have started a new session
1531 	 * then let the host reset code handle this
1532 	 */
1533 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
1534 	    sc->SCp.phase != session->age) {
1535 		spin_unlock_bh(&session->lock);
1536 		mutex_unlock(&session->eh_mutex);
1537 		return FAILED;
1538 	}
1539 
1540 	conn = session->leadconn;
1541 	conn->eh_abort_cnt++;
1542 	age = session->age;
1543 
1544 	ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
1545 	debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
1546 
1547 	/* ctask completed before time out */
1548 	if (!ctask->sc) {
1549 		debug_scsi("sc completed while abort in progress\n");
1550 		goto success;
1551 	}
1552 
1553 	if (ctask->state == ISCSI_TASK_PENDING) {
1554 		fail_command(conn, ctask, DID_ABORT << 16);
1555 		goto success;
1556 	}
1557 
1558 	/* only have one tmf outstanding at a time */
1559 	if (conn->tmf_state != TMF_INITIAL)
1560 		goto failed;
1561 	conn->tmf_state = TMF_QUEUED;
1562 
1563 	hdr = &conn->tmhdr;
1564 	iscsi_prep_abort_task_pdu(ctask, hdr);
1565 
1566 	if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
1567 		rc = FAILED;
1568 		goto failed;
1569 	}
1570 
1571 	switch (conn->tmf_state) {
1572 	case TMF_SUCCESS:
1573 		spin_unlock_bh(&session->lock);
1574 		iscsi_suspend_tx(conn);
1575 		/*
1576 		 * clean up task if aborted. grab the recv lock as a writer
1577 		 */
1578 		write_lock_bh(conn->recv_lock);
1579 		spin_lock(&session->lock);
1580 		fail_command(conn, ctask, DID_ABORT << 16);
1581 		conn->tmf_state = TMF_INITIAL;
1582 		spin_unlock(&session->lock);
1583 		write_unlock_bh(conn->recv_lock);
1584 		iscsi_start_tx(conn);
1585 		goto success_unlocked;
1586 	case TMF_TIMEDOUT:
1587 		spin_unlock_bh(&session->lock);
1588 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1589 		goto failed_unlocked;
1590 	case TMF_NOT_FOUND:
1591 		if (!sc->SCp.ptr) {
1592 			conn->tmf_state = TMF_INITIAL;
1593 			/* ctask completed before tmf abort response */
1594 			debug_scsi("sc completed while abort in progress\n");
1595 			goto success;
1596 		}
1597 		/* fall through */
1598 	default:
1599 		conn->tmf_state = TMF_INITIAL;
1600 		goto failed;
1601 	}
1602 
1603 success:
1604 	spin_unlock_bh(&session->lock);
1605 success_unlocked:
1606 	debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
1607 	mutex_unlock(&session->eh_mutex);
1608 	return SUCCESS;
1609 
1610 failed:
1611 	spin_unlock_bh(&session->lock);
1612 failed_unlocked:
1613 	debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
1614 		    ctask ? ctask->itt : 0);
1615 	mutex_unlock(&session->eh_mutex);
1616 	return FAILED;
1617 }
1618 EXPORT_SYMBOL_GPL(iscsi_eh_abort);
1619 
1620 static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
1621 {
1622 	memset(hdr, 0, sizeof(*hdr));
1623 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1624 	hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
1625 	hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1626 	int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
1627 	hdr->rtt = RESERVED_ITT;
1628 }
1629 
1630 int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1631 {
1632 	struct Scsi_Host *host = sc->device->host;
1633 	struct iscsi_session *session = iscsi_hostdata(host->hostdata);
1634 	struct iscsi_conn *conn;
1635 	struct iscsi_tm *hdr;
1636 	int rc = FAILED;
1637 
1638 	debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
1639 
1640 	mutex_lock(&session->eh_mutex);
1641 	spin_lock_bh(&session->lock);
1642 	/*
1643 	 * Just check if we are not logged in. We cannot check for
1644 	 * the phase because the reset could come from a ioctl.
1645 	 */
1646 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
1647 		goto unlock;
1648 	conn = session->leadconn;
1649 
1650 	/* only have one tmf outstanding at a time */
1651 	if (conn->tmf_state != TMF_INITIAL)
1652 		goto unlock;
1653 	conn->tmf_state = TMF_QUEUED;
1654 
1655 	hdr = &conn->tmhdr;
1656 	iscsi_prep_lun_reset_pdu(sc, hdr);
1657 
1658 	if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
1659 				    session->lu_reset_timeout)) {
1660 		rc = FAILED;
1661 		goto unlock;
1662 	}
1663 
1664 	switch (conn->tmf_state) {
1665 	case TMF_SUCCESS:
1666 		break;
1667 	case TMF_TIMEDOUT:
1668 		spin_unlock_bh(&session->lock);
1669 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1670 		goto done;
1671 	default:
1672 		conn->tmf_state = TMF_INITIAL;
1673 		goto unlock;
1674 	}
1675 
1676 	rc = SUCCESS;
1677 	spin_unlock_bh(&session->lock);
1678 
1679 	iscsi_suspend_tx(conn);
1680 	/* need to grab the recv lock then session lock */
1681 	write_lock_bh(conn->recv_lock);
1682 	spin_lock(&session->lock);
1683 	fail_all_commands(conn, sc->device->lun, DID_ERROR);
1684 	conn->tmf_state = TMF_INITIAL;
1685 	spin_unlock(&session->lock);
1686 	write_unlock_bh(conn->recv_lock);
1687 
1688 	iscsi_start_tx(conn);
1689 	goto done;
1690 
1691 unlock:
1692 	spin_unlock_bh(&session->lock);
1693 done:
1694 	debug_scsi("iscsi_eh_device_reset %s\n",
1695 		  rc == SUCCESS ? "SUCCESS" : "FAILED");
1696 	mutex_unlock(&session->eh_mutex);
1697 	return rc;
1698 }
1699 EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
1700 
1701 /*
1702  * Pre-allocate a pool of @max items of @item_size. By default, the pool
1703  * should be accessed via kfifo_{get,put} on q->queue.
1704  * Optionally, the caller can obtain the array of object pointers
1705  * by passing in a non-NULL @items pointer
1706  */
1707 int
1708 iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
1709 {
1710 	int i, num_arrays = 1;
1711 
1712 	memset(q, 0, sizeof(*q));
1713 
1714 	q->max = max;
1715 
1716 	/* If the user passed an items pointer, he wants a copy of
1717 	 * the array. */
1718 	if (items)
1719 		num_arrays++;
1720 	q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
1721 	if (q->pool == NULL)
1722 		goto enomem;
1723 
1724 	q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
1725 			      GFP_KERNEL, NULL);
1726 	if (q->queue == ERR_PTR(-ENOMEM))
1727 		goto enomem;
1728 
1729 	for (i = 0; i < max; i++) {
1730 		q->pool[i] = kzalloc(item_size, GFP_KERNEL);
1731 		if (q->pool[i] == NULL) {
1732 			q->max = i;
1733 			goto enomem;
1734 		}
1735 		__kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
1736 	}
1737 
1738 	if (items) {
1739 		*items = q->pool + max;
1740 		memcpy(*items, q->pool, max * sizeof(void *));
1741 	}
1742 
1743 	return 0;
1744 
1745 enomem:
1746 	iscsi_pool_free(q);
1747 	return -ENOMEM;
1748 }
1749 EXPORT_SYMBOL_GPL(iscsi_pool_init);
1750 
1751 void iscsi_pool_free(struct iscsi_pool *q)
1752 {
1753 	int i;
1754 
1755 	for (i = 0; i < q->max; i++)
1756 		kfree(q->pool[i]);
1757 	if (q->pool)
1758 		kfree(q->pool);
1759 }
1760 EXPORT_SYMBOL_GPL(iscsi_pool_free);
1761 
1762 /*
1763  * iSCSI Session's hostdata organization:
1764  *
1765  *    *------------------* <== hostdata_session(host->hostdata)
1766  *    | ptr to class sess|
1767  *    |------------------| <== iscsi_hostdata(host->hostdata)
1768  *    | iscsi_session    |
1769  *    *------------------*
1770  */
1771 
1772 #define hostdata_privsize(_sz)	(sizeof(unsigned long) + _sz + \
1773 				 _sz % sizeof(unsigned long))
1774 
1775 #define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
1776 
1777 /**
1778  * iscsi_session_setup - create iscsi cls session and host and session
1779  * @scsit: scsi transport template
1780  * @iscsit: iscsi transport template
1781  * @cmds_max: scsi host can queue
1782  * @qdepth: scsi host cmds per lun
1783  * @cmd_task_size: LLD ctask private data size
1784  * @mgmt_task_size: LLD mtask private data size
1785  * @initial_cmdsn: initial CmdSN
1786  * @hostno: host no allocated
1787  *
1788  * This can be used by software iscsi_transports that allocate
1789  * a session per scsi host.
1790  **/
1791 struct iscsi_cls_session *
1792 iscsi_session_setup(struct iscsi_transport *iscsit,
1793 		    struct scsi_transport_template *scsit,
1794 		    uint16_t cmds_max, uint16_t qdepth,
1795 		    int cmd_task_size, int mgmt_task_size,
1796 		    uint32_t initial_cmdsn, uint32_t *hostno)
1797 {
1798 	struct Scsi_Host *shost;
1799 	struct iscsi_session *session;
1800 	struct iscsi_cls_session *cls_session;
1801 	int cmd_i;
1802 
1803 	if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
1804 		if (qdepth != 0)
1805 			printk(KERN_ERR "iscsi: invalid queue depth of %d. "
1806 			      "Queue depth must be between 1 and %d.\n",
1807 			      qdepth, ISCSI_MAX_CMD_PER_LUN);
1808 		qdepth = ISCSI_DEF_CMD_PER_LUN;
1809 	}
1810 
1811 	if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
1812 	    cmds_max < 2) {
1813 		if (cmds_max != 0)
1814 			printk(KERN_ERR "iscsi: invalid can_queue of %d. "
1815 			       "can_queue must be a power of 2 and between "
1816 			       "2 and %d - setting to %d.\n", cmds_max,
1817 			       ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
1818 		cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
1819 	}
1820 
1821 	shost = scsi_host_alloc(iscsit->host_template,
1822 				hostdata_privsize(sizeof(*session)));
1823 	if (!shost)
1824 		return NULL;
1825 
1826 	/* the iscsi layer takes one task for reserve */
1827 	shost->can_queue = cmds_max - 1;
1828 	shost->cmd_per_lun = qdepth;
1829 	shost->max_id = 1;
1830 	shost->max_channel = 0;
1831 	shost->max_lun = iscsit->max_lun;
1832 	shost->max_cmd_len = iscsit->max_cmd_len;
1833 	shost->transportt = scsit;
1834 	shost->transportt->create_work_queue = 1;
1835 	shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
1836 	*hostno = shost->host_no;
1837 
1838 	session = iscsi_hostdata(shost->hostdata);
1839 	memset(session, 0, sizeof(struct iscsi_session));
1840 	session->host = shost;
1841 	session->state = ISCSI_STATE_FREE;
1842 	session->fast_abort = 1;
1843 	session->lu_reset_timeout = 15;
1844 	session->abort_timeout = 10;
1845 	session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
1846 	session->cmds_max = cmds_max;
1847 	session->queued_cmdsn = session->cmdsn = initial_cmdsn;
1848 	session->exp_cmdsn = initial_cmdsn + 1;
1849 	session->max_cmdsn = initial_cmdsn + 1;
1850 	session->max_r2t = 1;
1851 	session->tt = iscsit;
1852 	mutex_init(&session->eh_mutex);
1853 
1854 	/* initialize SCSI PDU commands pool */
1855 	if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
1856 			    (void***)&session->cmds,
1857 			    cmd_task_size + sizeof(struct iscsi_cmd_task)))
1858 		goto cmdpool_alloc_fail;
1859 
1860 	/* pre-format cmds pool with ITT */
1861 	for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1862 		struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
1863 
1864 		if (cmd_task_size)
1865 			ctask->dd_data = &ctask[1];
1866 		ctask->itt = cmd_i;
1867 		INIT_LIST_HEAD(&ctask->running);
1868 	}
1869 
1870 	spin_lock_init(&session->lock);
1871 
1872 	/* initialize immediate command pool */
1873 	if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
1874 			   (void***)&session->mgmt_cmds,
1875 			   mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
1876 		goto mgmtpool_alloc_fail;
1877 
1878 
1879 	/* pre-format immediate cmds pool with ITT */
1880 	for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
1881 		struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
1882 
1883 		if (mgmt_task_size)
1884 			mtask->dd_data = &mtask[1];
1885 		mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
1886 		INIT_LIST_HEAD(&mtask->running);
1887 	}
1888 
1889 	if (scsi_add_host(shost, NULL))
1890 		goto add_host_fail;
1891 
1892 	if (!try_module_get(iscsit->owner))
1893 		goto cls_session_fail;
1894 
1895 	cls_session = iscsi_create_session(shost, iscsit, 0);
1896 	if (!cls_session)
1897 		goto module_put;
1898 	*(unsigned long*)shost->hostdata = (unsigned long)cls_session;
1899 
1900 	return cls_session;
1901 
1902 module_put:
1903 	module_put(iscsit->owner);
1904 cls_session_fail:
1905 	scsi_remove_host(shost);
1906 add_host_fail:
1907 	iscsi_pool_free(&session->mgmtpool);
1908 mgmtpool_alloc_fail:
1909 	iscsi_pool_free(&session->cmdpool);
1910 cmdpool_alloc_fail:
1911 	scsi_host_put(shost);
1912 	return NULL;
1913 }
1914 EXPORT_SYMBOL_GPL(iscsi_session_setup);
1915 
1916 /**
1917  * iscsi_session_teardown - destroy session, host, and cls_session
1918  * shost: scsi host
1919  *
1920  * This can be used by software iscsi_transports that allocate
1921  * a session per scsi host.
1922  **/
1923 void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1924 {
1925 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1926 	struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1927 	struct module *owner = cls_session->transport->owner;
1928 
1929 	iscsi_remove_session(cls_session);
1930 	scsi_remove_host(shost);
1931 
1932 	iscsi_pool_free(&session->mgmtpool);
1933 	iscsi_pool_free(&session->cmdpool);
1934 
1935 	kfree(session->password);
1936 	kfree(session->password_in);
1937 	kfree(session->username);
1938 	kfree(session->username_in);
1939 	kfree(session->targetname);
1940 	kfree(session->netdev);
1941 	kfree(session->hwaddress);
1942 	kfree(session->initiatorname);
1943 
1944 	iscsi_free_session(cls_session);
1945 	scsi_host_put(shost);
1946 	module_put(owner);
1947 }
1948 EXPORT_SYMBOL_GPL(iscsi_session_teardown);
1949 
1950 /**
1951  * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
1952  * @cls_session: iscsi_cls_session
1953  * @conn_idx: cid
1954  **/
1955 struct iscsi_cls_conn *
1956 iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1957 {
1958 	struct iscsi_session *session = class_to_transport_session(cls_session);
1959 	struct iscsi_conn *conn;
1960 	struct iscsi_cls_conn *cls_conn;
1961 	char *data;
1962 
1963 	cls_conn = iscsi_create_conn(cls_session, conn_idx);
1964 	if (!cls_conn)
1965 		return NULL;
1966 	conn = cls_conn->dd_data;
1967 	memset(conn, 0, sizeof(*conn));
1968 
1969 	conn->session = session;
1970 	conn->cls_conn = cls_conn;
1971 	conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
1972 	conn->id = conn_idx;
1973 	conn->exp_statsn = 0;
1974 	conn->tmf_state = TMF_INITIAL;
1975 
1976 	init_timer(&conn->transport_timer);
1977 	conn->transport_timer.data = (unsigned long)conn;
1978 	conn->transport_timer.function = iscsi_check_transport_timeouts;
1979 
1980 	INIT_LIST_HEAD(&conn->run_list);
1981 	INIT_LIST_HEAD(&conn->mgmt_run_list);
1982 	INIT_LIST_HEAD(&conn->mgmtqueue);
1983 	INIT_LIST_HEAD(&conn->xmitqueue);
1984 	INIT_LIST_HEAD(&conn->requeue);
1985 	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
1986 
1987 	/* allocate login_mtask used for the login/text sequences */
1988 	spin_lock_bh(&session->lock);
1989 	if (!__kfifo_get(session->mgmtpool.queue,
1990                          (void*)&conn->login_mtask,
1991 			 sizeof(void*))) {
1992 		spin_unlock_bh(&session->lock);
1993 		goto login_mtask_alloc_fail;
1994 	}
1995 	spin_unlock_bh(&session->lock);
1996 
1997 	data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
1998 	if (!data)
1999 		goto login_mtask_data_alloc_fail;
2000 	conn->login_mtask->data = conn->data = data;
2001 
2002 	init_timer(&conn->tmf_timer);
2003 	init_waitqueue_head(&conn->ehwait);
2004 
2005 	return cls_conn;
2006 
2007 login_mtask_data_alloc_fail:
2008 	__kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2009 		    sizeof(void*));
2010 login_mtask_alloc_fail:
2011 	iscsi_destroy_conn(cls_conn);
2012 	return NULL;
2013 }
2014 EXPORT_SYMBOL_GPL(iscsi_conn_setup);
2015 
2016 /**
2017  * iscsi_conn_teardown - teardown iscsi connection
2018  * cls_conn: iscsi class connection
2019  *
2020  * TODO: we may need to make this into a two step process
2021  * like scsi-mls remove + put host
2022  */
2023 void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2024 {
2025 	struct iscsi_conn *conn = cls_conn->dd_data;
2026 	struct iscsi_session *session = conn->session;
2027 	unsigned long flags;
2028 
2029 	del_timer_sync(&conn->transport_timer);
2030 
2031 	spin_lock_bh(&session->lock);
2032 	conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2033 	if (session->leadconn == conn) {
2034 		/*
2035 		 * leading connection? then give up on recovery.
2036 		 */
2037 		session->state = ISCSI_STATE_TERMINATE;
2038 		wake_up(&conn->ehwait);
2039 	}
2040 	spin_unlock_bh(&session->lock);
2041 
2042 	/*
2043 	 * Block until all in-progress commands for this connection
2044 	 * time out or fail.
2045 	 */
2046 	for (;;) {
2047 		spin_lock_irqsave(session->host->host_lock, flags);
2048 		if (!session->host->host_busy) { /* OK for ERL == 0 */
2049 			spin_unlock_irqrestore(session->host->host_lock, flags);
2050 			break;
2051 		}
2052 		spin_unlock_irqrestore(session->host->host_lock, flags);
2053 		msleep_interruptible(500);
2054 		iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
2055 				  "host_busy %d host_failed %d\n",
2056 				  session->host->host_busy,
2057 				  session->host->host_failed);
2058 		/*
2059 		 * force eh_abort() to unblock
2060 		 */
2061 		wake_up(&conn->ehwait);
2062 	}
2063 
2064 	/* flush queued up work because we free the connection below */
2065 	iscsi_suspend_tx(conn);
2066 
2067 	spin_lock_bh(&session->lock);
2068 	kfree(conn->data);
2069 	kfree(conn->persistent_address);
2070 	__kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2071 		    sizeof(void*));
2072 	if (session->leadconn == conn)
2073 		session->leadconn = NULL;
2074 	spin_unlock_bh(&session->lock);
2075 
2076 	iscsi_destroy_conn(cls_conn);
2077 }
2078 EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
2079 
2080 int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2081 {
2082 	struct iscsi_conn *conn = cls_conn->dd_data;
2083 	struct iscsi_session *session = conn->session;
2084 
2085 	if (!session) {
2086 		iscsi_conn_printk(KERN_ERR, conn,
2087 				  "can't start unbound connection\n");
2088 		return -EPERM;
2089 	}
2090 
2091 	if ((session->imm_data_en || !session->initial_r2t_en) &&
2092 	     session->first_burst > session->max_burst) {
2093 		iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
2094 				  "first_burst %d max_burst %d\n",
2095 				  session->first_burst, session->max_burst);
2096 		return -EINVAL;
2097 	}
2098 
2099 	if (conn->ping_timeout && !conn->recv_timeout) {
2100 		iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
2101 				  "zero. Using 5 seconds\n.");
2102 		conn->recv_timeout = 5;
2103 	}
2104 
2105 	if (conn->recv_timeout && !conn->ping_timeout) {
2106 		iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
2107 				  "zero. Using 5 seconds.\n");
2108 		conn->ping_timeout = 5;
2109 	}
2110 
2111 	spin_lock_bh(&session->lock);
2112 	conn->c_stage = ISCSI_CONN_STARTED;
2113 	session->state = ISCSI_STATE_LOGGED_IN;
2114 	session->queued_cmdsn = session->cmdsn;
2115 
2116 	conn->last_recv = jiffies;
2117 	conn->last_ping = jiffies;
2118 	if (conn->recv_timeout && conn->ping_timeout)
2119 		mod_timer(&conn->transport_timer,
2120 			  jiffies + (conn->recv_timeout * HZ));
2121 
2122 	switch(conn->stop_stage) {
2123 	case STOP_CONN_RECOVER:
2124 		/*
2125 		 * unblock eh_abort() if it is blocked. re-try all
2126 		 * commands after successful recovery
2127 		 */
2128 		conn->stop_stage = 0;
2129 		conn->tmf_state = TMF_INITIAL;
2130 		session->age++;
2131 		if (session->age == 16)
2132 			session->age = 0;
2133 		break;
2134 	case STOP_CONN_TERM:
2135 		conn->stop_stage = 0;
2136 		break;
2137 	default:
2138 		break;
2139 	}
2140 	spin_unlock_bh(&session->lock);
2141 
2142 	iscsi_unblock_session(session_to_cls(session));
2143 	wake_up(&conn->ehwait);
2144 	return 0;
2145 }
2146 EXPORT_SYMBOL_GPL(iscsi_conn_start);
2147 
2148 static void
2149 flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
2150 {
2151 	struct iscsi_mgmt_task *mtask, *tmp;
2152 
2153 	/* handle pending */
2154 	list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
2155 		debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
2156 		iscsi_free_mgmt_task(conn, mtask);
2157 	}
2158 
2159 	/* handle running */
2160 	list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
2161 		debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
2162 		iscsi_free_mgmt_task(conn, mtask);
2163 	}
2164 
2165 	conn->mtask = NULL;
2166 }
2167 
2168 static void iscsi_start_session_recovery(struct iscsi_session *session,
2169 					 struct iscsi_conn *conn, int flag)
2170 {
2171 	int old_stop_stage;
2172 
2173 	del_timer_sync(&conn->transport_timer);
2174 
2175 	mutex_lock(&session->eh_mutex);
2176 	spin_lock_bh(&session->lock);
2177 	if (conn->stop_stage == STOP_CONN_TERM) {
2178 		spin_unlock_bh(&session->lock);
2179 		mutex_unlock(&session->eh_mutex);
2180 		return;
2181 	}
2182 
2183 	/*
2184 	 * The LLD either freed/unset the lock on us, or userspace called
2185 	 * stop but did not create a proper connection (connection was never
2186 	 * bound or it was unbound then stop was called).
2187 	 */
2188 	if (!conn->recv_lock) {
2189 		spin_unlock_bh(&session->lock);
2190 		mutex_unlock(&session->eh_mutex);
2191 		return;
2192 	}
2193 
2194 	/*
2195 	 * When this is called for the in_login state, we only want to clean
2196 	 * up the login task and connection. We do not need to block and set
2197 	 * the recovery state again
2198 	 */
2199 	if (flag == STOP_CONN_TERM)
2200 		session->state = ISCSI_STATE_TERMINATE;
2201 	else if (conn->stop_stage != STOP_CONN_RECOVER)
2202 		session->state = ISCSI_STATE_IN_RECOVERY;
2203 
2204 	old_stop_stage = conn->stop_stage;
2205 	conn->stop_stage = flag;
2206 	conn->c_stage = ISCSI_CONN_STOPPED;
2207 	spin_unlock_bh(&session->lock);
2208 
2209 	iscsi_suspend_tx(conn);
2210 
2211 	write_lock_bh(conn->recv_lock);
2212 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2213 	write_unlock_bh(conn->recv_lock);
2214 
2215 	/*
2216 	 * for connection level recovery we should not calculate
2217 	 * header digest. conn->hdr_size used for optimization
2218 	 * in hdr_extract() and will be re-negotiated at
2219 	 * set_param() time.
2220 	 */
2221 	if (flag == STOP_CONN_RECOVER) {
2222 		conn->hdrdgst_en = 0;
2223 		conn->datadgst_en = 0;
2224 		if (session->state == ISCSI_STATE_IN_RECOVERY &&
2225 		    old_stop_stage != STOP_CONN_RECOVER) {
2226 			debug_scsi("blocking session\n");
2227 			iscsi_block_session(session_to_cls(session));
2228 		}
2229 	}
2230 
2231 	/*
2232 	 * flush queues.
2233 	 */
2234 	spin_lock_bh(&session->lock);
2235 	fail_all_commands(conn, -1,
2236 			STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
2237 	flush_control_queues(session, conn);
2238 	spin_unlock_bh(&session->lock);
2239 	mutex_unlock(&session->eh_mutex);
2240 }
2241 
2242 void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
2243 {
2244 	struct iscsi_conn *conn = cls_conn->dd_data;
2245 	struct iscsi_session *session = conn->session;
2246 
2247 	switch (flag) {
2248 	case STOP_CONN_RECOVER:
2249 	case STOP_CONN_TERM:
2250 		iscsi_start_session_recovery(session, conn, flag);
2251 		break;
2252 	default:
2253 		iscsi_conn_printk(KERN_ERR, conn,
2254 				  "invalid stop flag %d\n", flag);
2255 	}
2256 }
2257 EXPORT_SYMBOL_GPL(iscsi_conn_stop);
2258 
2259 int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2260 		    struct iscsi_cls_conn *cls_conn, int is_leading)
2261 {
2262 	struct iscsi_session *session = class_to_transport_session(cls_session);
2263 	struct iscsi_conn *conn = cls_conn->dd_data;
2264 
2265 	spin_lock_bh(&session->lock);
2266 	if (is_leading)
2267 		session->leadconn = conn;
2268 	spin_unlock_bh(&session->lock);
2269 
2270 	/*
2271 	 * Unblock xmitworker(), Login Phase will pass through.
2272 	 */
2273 	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2274 	clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
2275 	return 0;
2276 }
2277 EXPORT_SYMBOL_GPL(iscsi_conn_bind);
2278 
2279 
2280 int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2281 		    enum iscsi_param param, char *buf, int buflen)
2282 {
2283 	struct iscsi_conn *conn = cls_conn->dd_data;
2284 	struct iscsi_session *session = conn->session;
2285 	uint32_t value;
2286 
2287 	switch(param) {
2288 	case ISCSI_PARAM_FAST_ABORT:
2289 		sscanf(buf, "%d", &session->fast_abort);
2290 		break;
2291 	case ISCSI_PARAM_ABORT_TMO:
2292 		sscanf(buf, "%d", &session->abort_timeout);
2293 		break;
2294 	case ISCSI_PARAM_LU_RESET_TMO:
2295 		sscanf(buf, "%d", &session->lu_reset_timeout);
2296 		break;
2297 	case ISCSI_PARAM_PING_TMO:
2298 		sscanf(buf, "%d", &conn->ping_timeout);
2299 		break;
2300 	case ISCSI_PARAM_RECV_TMO:
2301 		sscanf(buf, "%d", &conn->recv_timeout);
2302 		break;
2303 	case ISCSI_PARAM_MAX_RECV_DLENGTH:
2304 		sscanf(buf, "%d", &conn->max_recv_dlength);
2305 		break;
2306 	case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2307 		sscanf(buf, "%d", &conn->max_xmit_dlength);
2308 		break;
2309 	case ISCSI_PARAM_HDRDGST_EN:
2310 		sscanf(buf, "%d", &conn->hdrdgst_en);
2311 		break;
2312 	case ISCSI_PARAM_DATADGST_EN:
2313 		sscanf(buf, "%d", &conn->datadgst_en);
2314 		break;
2315 	case ISCSI_PARAM_INITIAL_R2T_EN:
2316 		sscanf(buf, "%d", &session->initial_r2t_en);
2317 		break;
2318 	case ISCSI_PARAM_MAX_R2T:
2319 		sscanf(buf, "%d", &session->max_r2t);
2320 		break;
2321 	case ISCSI_PARAM_IMM_DATA_EN:
2322 		sscanf(buf, "%d", &session->imm_data_en);
2323 		break;
2324 	case ISCSI_PARAM_FIRST_BURST:
2325 		sscanf(buf, "%d", &session->first_burst);
2326 		break;
2327 	case ISCSI_PARAM_MAX_BURST:
2328 		sscanf(buf, "%d", &session->max_burst);
2329 		break;
2330 	case ISCSI_PARAM_PDU_INORDER_EN:
2331 		sscanf(buf, "%d", &session->pdu_inorder_en);
2332 		break;
2333 	case ISCSI_PARAM_DATASEQ_INORDER_EN:
2334 		sscanf(buf, "%d", &session->dataseq_inorder_en);
2335 		break;
2336 	case ISCSI_PARAM_ERL:
2337 		sscanf(buf, "%d", &session->erl);
2338 		break;
2339 	case ISCSI_PARAM_IFMARKER_EN:
2340 		sscanf(buf, "%d", &value);
2341 		BUG_ON(value);
2342 		break;
2343 	case ISCSI_PARAM_OFMARKER_EN:
2344 		sscanf(buf, "%d", &value);
2345 		BUG_ON(value);
2346 		break;
2347 	case ISCSI_PARAM_EXP_STATSN:
2348 		sscanf(buf, "%u", &conn->exp_statsn);
2349 		break;
2350 	case ISCSI_PARAM_USERNAME:
2351 		kfree(session->username);
2352 		session->username = kstrdup(buf, GFP_KERNEL);
2353 		if (!session->username)
2354 			return -ENOMEM;
2355 		break;
2356 	case ISCSI_PARAM_USERNAME_IN:
2357 		kfree(session->username_in);
2358 		session->username_in = kstrdup(buf, GFP_KERNEL);
2359 		if (!session->username_in)
2360 			return -ENOMEM;
2361 		break;
2362 	case ISCSI_PARAM_PASSWORD:
2363 		kfree(session->password);
2364 		session->password = kstrdup(buf, GFP_KERNEL);
2365 		if (!session->password)
2366 			return -ENOMEM;
2367 		break;
2368 	case ISCSI_PARAM_PASSWORD_IN:
2369 		kfree(session->password_in);
2370 		session->password_in = kstrdup(buf, GFP_KERNEL);
2371 		if (!session->password_in)
2372 			return -ENOMEM;
2373 		break;
2374 	case ISCSI_PARAM_TARGET_NAME:
2375 		/* this should not change between logins */
2376 		if (session->targetname)
2377 			break;
2378 
2379 		session->targetname = kstrdup(buf, GFP_KERNEL);
2380 		if (!session->targetname)
2381 			return -ENOMEM;
2382 		break;
2383 	case ISCSI_PARAM_TPGT:
2384 		sscanf(buf, "%d", &session->tpgt);
2385 		break;
2386 	case ISCSI_PARAM_PERSISTENT_PORT:
2387 		sscanf(buf, "%d", &conn->persistent_port);
2388 		break;
2389 	case ISCSI_PARAM_PERSISTENT_ADDRESS:
2390 		/*
2391 		 * this is the address returned in discovery so it should
2392 		 * not change between logins.
2393 		 */
2394 		if (conn->persistent_address)
2395 			break;
2396 
2397 		conn->persistent_address = kstrdup(buf, GFP_KERNEL);
2398 		if (!conn->persistent_address)
2399 			return -ENOMEM;
2400 		break;
2401 	default:
2402 		return -ENOSYS;
2403 	}
2404 
2405 	return 0;
2406 }
2407 EXPORT_SYMBOL_GPL(iscsi_set_param);
2408 
2409 int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2410 			    enum iscsi_param param, char *buf)
2411 {
2412 	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
2413 	struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2414 	int len;
2415 
2416 	switch(param) {
2417 	case ISCSI_PARAM_FAST_ABORT:
2418 		len = sprintf(buf, "%d\n", session->fast_abort);
2419 		break;
2420 	case ISCSI_PARAM_ABORT_TMO:
2421 		len = sprintf(buf, "%d\n", session->abort_timeout);
2422 		break;
2423 	case ISCSI_PARAM_LU_RESET_TMO:
2424 		len = sprintf(buf, "%d\n", session->lu_reset_timeout);
2425 		break;
2426 	case ISCSI_PARAM_INITIAL_R2T_EN:
2427 		len = sprintf(buf, "%d\n", session->initial_r2t_en);
2428 		break;
2429 	case ISCSI_PARAM_MAX_R2T:
2430 		len = sprintf(buf, "%hu\n", session->max_r2t);
2431 		break;
2432 	case ISCSI_PARAM_IMM_DATA_EN:
2433 		len = sprintf(buf, "%d\n", session->imm_data_en);
2434 		break;
2435 	case ISCSI_PARAM_FIRST_BURST:
2436 		len = sprintf(buf, "%u\n", session->first_burst);
2437 		break;
2438 	case ISCSI_PARAM_MAX_BURST:
2439 		len = sprintf(buf, "%u\n", session->max_burst);
2440 		break;
2441 	case ISCSI_PARAM_PDU_INORDER_EN:
2442 		len = sprintf(buf, "%d\n", session->pdu_inorder_en);
2443 		break;
2444 	case ISCSI_PARAM_DATASEQ_INORDER_EN:
2445 		len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
2446 		break;
2447 	case ISCSI_PARAM_ERL:
2448 		len = sprintf(buf, "%d\n", session->erl);
2449 		break;
2450 	case ISCSI_PARAM_TARGET_NAME:
2451 		len = sprintf(buf, "%s\n", session->targetname);
2452 		break;
2453 	case ISCSI_PARAM_TPGT:
2454 		len = sprintf(buf, "%d\n", session->tpgt);
2455 		break;
2456 	case ISCSI_PARAM_USERNAME:
2457 		len = sprintf(buf, "%s\n", session->username);
2458 		break;
2459 	case ISCSI_PARAM_USERNAME_IN:
2460 		len = sprintf(buf, "%s\n", session->username_in);
2461 		break;
2462 	case ISCSI_PARAM_PASSWORD:
2463 		len = sprintf(buf, "%s\n", session->password);
2464 		break;
2465 	case ISCSI_PARAM_PASSWORD_IN:
2466 		len = sprintf(buf, "%s\n", session->password_in);
2467 		break;
2468 	default:
2469 		return -ENOSYS;
2470 	}
2471 
2472 	return len;
2473 }
2474 EXPORT_SYMBOL_GPL(iscsi_session_get_param);
2475 
2476 int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
2477 			 enum iscsi_param param, char *buf)
2478 {
2479 	struct iscsi_conn *conn = cls_conn->dd_data;
2480 	int len;
2481 
2482 	switch(param) {
2483 	case ISCSI_PARAM_PING_TMO:
2484 		len = sprintf(buf, "%u\n", conn->ping_timeout);
2485 		break;
2486 	case ISCSI_PARAM_RECV_TMO:
2487 		len = sprintf(buf, "%u\n", conn->recv_timeout);
2488 		break;
2489 	case ISCSI_PARAM_MAX_RECV_DLENGTH:
2490 		len = sprintf(buf, "%u\n", conn->max_recv_dlength);
2491 		break;
2492 	case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2493 		len = sprintf(buf, "%u\n", conn->max_xmit_dlength);
2494 		break;
2495 	case ISCSI_PARAM_HDRDGST_EN:
2496 		len = sprintf(buf, "%d\n", conn->hdrdgst_en);
2497 		break;
2498 	case ISCSI_PARAM_DATADGST_EN:
2499 		len = sprintf(buf, "%d\n", conn->datadgst_en);
2500 		break;
2501 	case ISCSI_PARAM_IFMARKER_EN:
2502 		len = sprintf(buf, "%d\n", conn->ifmarker_en);
2503 		break;
2504 	case ISCSI_PARAM_OFMARKER_EN:
2505 		len = sprintf(buf, "%d\n", conn->ofmarker_en);
2506 		break;
2507 	case ISCSI_PARAM_EXP_STATSN:
2508 		len = sprintf(buf, "%u\n", conn->exp_statsn);
2509 		break;
2510 	case ISCSI_PARAM_PERSISTENT_PORT:
2511 		len = sprintf(buf, "%d\n", conn->persistent_port);
2512 		break;
2513 	case ISCSI_PARAM_PERSISTENT_ADDRESS:
2514 		len = sprintf(buf, "%s\n", conn->persistent_address);
2515 		break;
2516 	default:
2517 		return -ENOSYS;
2518 	}
2519 
2520 	return len;
2521 }
2522 EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
2523 
2524 int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2525 			 char *buf)
2526 {
2527 	struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2528 	int len;
2529 
2530 	switch (param) {
2531 	case ISCSI_HOST_PARAM_NETDEV_NAME:
2532 		if (!session->netdev)
2533 			len = sprintf(buf, "%s\n", "default");
2534 		else
2535 			len = sprintf(buf, "%s\n", session->netdev);
2536 		break;
2537 	case ISCSI_HOST_PARAM_HWADDRESS:
2538 		if (!session->hwaddress)
2539 			len = sprintf(buf, "%s\n", "default");
2540 		else
2541 			len = sprintf(buf, "%s\n", session->hwaddress);
2542 		break;
2543 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
2544 		if (!session->initiatorname)
2545 			len = sprintf(buf, "%s\n", "unknown");
2546 		else
2547 			len = sprintf(buf, "%s\n", session->initiatorname);
2548 		break;
2549 
2550 	default:
2551 		return -ENOSYS;
2552 	}
2553 
2554 	return len;
2555 }
2556 EXPORT_SYMBOL_GPL(iscsi_host_get_param);
2557 
2558 int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2559 			 char *buf, int buflen)
2560 {
2561 	struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2562 
2563 	switch (param) {
2564 	case ISCSI_HOST_PARAM_NETDEV_NAME:
2565 		if (!session->netdev)
2566 			session->netdev = kstrdup(buf, GFP_KERNEL);
2567 		break;
2568 	case ISCSI_HOST_PARAM_HWADDRESS:
2569 		if (!session->hwaddress)
2570 			session->hwaddress = kstrdup(buf, GFP_KERNEL);
2571 		break;
2572 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
2573 		if (!session->initiatorname)
2574 			session->initiatorname = kstrdup(buf, GFP_KERNEL);
2575 		break;
2576 	default:
2577 		return -ENOSYS;
2578 	}
2579 
2580 	return 0;
2581 }
2582 EXPORT_SYMBOL_GPL(iscsi_host_set_param);
2583 
2584 MODULE_AUTHOR("Mike Christie");
2585 MODULE_DESCRIPTION("iSCSI library functions");
2586 MODULE_LICENSE("GPL");
2587