xref: /linux/drivers/target/iscsi/iscsi_target_util.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*******************************************************************************
2  * This file contains the iSCSI Target specific utility functions.
3  *
4  * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
5  *
6  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
7  *
8  * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  ******************************************************************************/
20 
21 #include <linux/list.h>
22 #include <scsi/scsi_tcq.h>
23 #include <scsi/iscsi_proto.h>
24 #include <target/target_core_base.h>
25 #include <target/target_core_fabric.h>
26 #include <target/target_core_configfs.h>
27 
28 #include "iscsi_target_core.h"
29 #include "iscsi_target_parameters.h"
30 #include "iscsi_target_seq_pdu_list.h"
31 #include "iscsi_target_datain_values.h"
32 #include "iscsi_target_erl0.h"
33 #include "iscsi_target_erl1.h"
34 #include "iscsi_target_erl2.h"
35 #include "iscsi_target_tpg.h"
36 #include "iscsi_target_tq.h"
37 #include "iscsi_target_util.h"
38 #include "iscsi_target.h"
39 
40 #define PRINT_BUFF(buff, len)					\
41 {								\
42 	int zzz;						\
43 								\
44 	pr_debug("%d:\n", __LINE__);				\
45 	for (zzz = 0; zzz < len; zzz++) {			\
46 		if (zzz % 16 == 0) {				\
47 			if (zzz)				\
48 				pr_debug("\n");			\
49 			pr_debug("%4i: ", zzz);			\
50 		}						\
51 		pr_debug("%02x ", (unsigned char) (buff)[zzz]);	\
52 	}							\
53 	if ((len + 1) % 16)					\
54 		pr_debug("\n");					\
55 }
56 
57 extern struct list_head g_tiqn_list;
58 extern spinlock_t tiqn_lock;
59 
60 /*
61  *	Called with cmd->r2t_lock held.
62  */
63 int iscsit_add_r2t_to_list(
64 	struct iscsi_cmd *cmd,
65 	u32 offset,
66 	u32 xfer_len,
67 	int recovery,
68 	u32 r2t_sn)
69 {
70 	struct iscsi_r2t *r2t;
71 
72 	r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
73 	if (!r2t) {
74 		pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
75 		return -1;
76 	}
77 	INIT_LIST_HEAD(&r2t->r2t_list);
78 
79 	r2t->recovery_r2t = recovery;
80 	r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
81 	r2t->offset = offset;
82 	r2t->xfer_len = xfer_len;
83 	list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
84 	spin_unlock_bh(&cmd->r2t_lock);
85 
86 	iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
87 
88 	spin_lock_bh(&cmd->r2t_lock);
89 	return 0;
90 }
91 
92 struct iscsi_r2t *iscsit_get_r2t_for_eos(
93 	struct iscsi_cmd *cmd,
94 	u32 offset,
95 	u32 length)
96 {
97 	struct iscsi_r2t *r2t;
98 
99 	spin_lock_bh(&cmd->r2t_lock);
100 	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
101 		if ((r2t->offset <= offset) &&
102 		    (r2t->offset + r2t->xfer_len) >= (offset + length)) {
103 			spin_unlock_bh(&cmd->r2t_lock);
104 			return r2t;
105 		}
106 	}
107 	spin_unlock_bh(&cmd->r2t_lock);
108 
109 	pr_err("Unable to locate R2T for Offset: %u, Length:"
110 			" %u\n", offset, length);
111 	return NULL;
112 }
113 
114 struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
115 {
116 	struct iscsi_r2t *r2t;
117 
118 	spin_lock_bh(&cmd->r2t_lock);
119 	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
120 		if (!r2t->sent_r2t) {
121 			spin_unlock_bh(&cmd->r2t_lock);
122 			return r2t;
123 		}
124 	}
125 	spin_unlock_bh(&cmd->r2t_lock);
126 
127 	pr_err("Unable to locate next R2T to send for ITT:"
128 			" 0x%08x.\n", cmd->init_task_tag);
129 	return NULL;
130 }
131 
132 /*
133  *	Called with cmd->r2t_lock held.
134  */
135 void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
136 {
137 	list_del(&r2t->r2t_list);
138 	kmem_cache_free(lio_r2t_cache, r2t);
139 }
140 
141 void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
142 {
143 	struct iscsi_r2t *r2t, *r2t_tmp;
144 
145 	spin_lock_bh(&cmd->r2t_lock);
146 	list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
147 		iscsit_free_r2t(r2t, cmd);
148 	spin_unlock_bh(&cmd->r2t_lock);
149 }
150 
151 /*
152  * May be called from software interrupt (timer) context for allocating
153  * iSCSI NopINs.
154  */
155 struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
156 {
157 	struct iscsi_cmd *cmd;
158 
159 	cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
160 	if (!cmd) {
161 		pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
162 		return NULL;
163 	}
164 
165 	cmd->conn	= conn;
166 	INIT_LIST_HEAD(&cmd->i_list);
167 	INIT_LIST_HEAD(&cmd->datain_list);
168 	INIT_LIST_HEAD(&cmd->cmd_r2t_list);
169 	init_completion(&cmd->reject_comp);
170 	spin_lock_init(&cmd->datain_lock);
171 	spin_lock_init(&cmd->dataout_timeout_lock);
172 	spin_lock_init(&cmd->istate_lock);
173 	spin_lock_init(&cmd->error_lock);
174 	spin_lock_init(&cmd->r2t_lock);
175 
176 	return cmd;
177 }
178 
179 /*
180  * Called from iscsi_handle_scsi_cmd()
181  */
182 struct iscsi_cmd *iscsit_allocate_se_cmd(
183 	struct iscsi_conn *conn,
184 	u32 data_length,
185 	int data_direction,
186 	int iscsi_task_attr)
187 {
188 	struct iscsi_cmd *cmd;
189 	struct se_cmd *se_cmd;
190 	int sam_task_attr;
191 
192 	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
193 	if (!cmd)
194 		return NULL;
195 
196 	cmd->data_direction = data_direction;
197 	cmd->data_length = data_length;
198 	/*
199 	 * Figure out the SAM Task Attribute for the incoming SCSI CDB
200 	 */
201 	if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
202 	    (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
203 		sam_task_attr = MSG_SIMPLE_TAG;
204 	else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
205 		sam_task_attr = MSG_ORDERED_TAG;
206 	else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
207 		sam_task_attr = MSG_HEAD_TAG;
208 	else if (iscsi_task_attr == ISCSI_ATTR_ACA)
209 		sam_task_attr = MSG_ACA_TAG;
210 	else {
211 		pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
212 			" MSG_SIMPLE_TAG\n", iscsi_task_attr);
213 		sam_task_attr = MSG_SIMPLE_TAG;
214 	}
215 
216 	se_cmd = &cmd->se_cmd;
217 	/*
218 	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
219 	 */
220 	transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
221 			conn->sess->se_sess, data_length, data_direction,
222 			sam_task_attr, &cmd->sense_buffer[0]);
223 	return cmd;
224 }
225 
226 struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
227 	struct iscsi_conn *conn,
228 	u8 function)
229 {
230 	struct iscsi_cmd *cmd;
231 	struct se_cmd *se_cmd;
232 	u8 tcm_function;
233 
234 	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
235 	if (!cmd)
236 		return NULL;
237 
238 	cmd->data_direction = DMA_NONE;
239 
240 	cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
241 	if (!cmd->tmr_req) {
242 		pr_err("Unable to allocate memory for"
243 			" Task Management command!\n");
244 		goto out;
245 	}
246 	/*
247 	 * TASK_REASSIGN for ERL=2 / connection stays inside of
248 	 * LIO-Target $FABRIC_MOD
249 	 */
250 	if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
251 		return cmd;
252 
253 	se_cmd = &cmd->se_cmd;
254 	/*
255 	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
256 	 */
257 	transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
258 				conn->sess->se_sess, 0, DMA_NONE,
259 				MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
260 
261 	switch (function) {
262 	case ISCSI_TM_FUNC_ABORT_TASK:
263 		tcm_function = TMR_ABORT_TASK;
264 		break;
265 	case ISCSI_TM_FUNC_ABORT_TASK_SET:
266 		tcm_function = TMR_ABORT_TASK_SET;
267 		break;
268 	case ISCSI_TM_FUNC_CLEAR_ACA:
269 		tcm_function = TMR_CLEAR_ACA;
270 		break;
271 	case ISCSI_TM_FUNC_CLEAR_TASK_SET:
272 		tcm_function = TMR_CLEAR_TASK_SET;
273 		break;
274 	case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
275 		tcm_function = TMR_LUN_RESET;
276 		break;
277 	case ISCSI_TM_FUNC_TARGET_WARM_RESET:
278 		tcm_function = TMR_TARGET_WARM_RESET;
279 		break;
280 	case ISCSI_TM_FUNC_TARGET_COLD_RESET:
281 		tcm_function = TMR_TARGET_COLD_RESET;
282 		break;
283 	default:
284 		pr_err("Unknown iSCSI TMR Function:"
285 			" 0x%02x\n", function);
286 		goto out;
287 	}
288 
289 	se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
290 				cmd->tmr_req, tcm_function,
291 				GFP_KERNEL);
292 	if (!se_cmd->se_tmr_req)
293 		goto out;
294 
295 	cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
296 
297 	return cmd;
298 out:
299 	iscsit_release_cmd(cmd);
300 	return NULL;
301 }
302 
303 int iscsit_decide_list_to_build(
304 	struct iscsi_cmd *cmd,
305 	u32 immediate_data_length)
306 {
307 	struct iscsi_build_list bl;
308 	struct iscsi_conn *conn = cmd->conn;
309 	struct iscsi_session *sess = conn->sess;
310 	struct iscsi_node_attrib *na;
311 
312 	if (sess->sess_ops->DataSequenceInOrder &&
313 	    sess->sess_ops->DataPDUInOrder)
314 		return 0;
315 
316 	if (cmd->data_direction == DMA_NONE)
317 		return 0;
318 
319 	na = iscsit_tpg_get_node_attrib(sess);
320 	memset(&bl, 0, sizeof(struct iscsi_build_list));
321 
322 	if (cmd->data_direction == DMA_FROM_DEVICE) {
323 		bl.data_direction = ISCSI_PDU_READ;
324 		bl.type = PDULIST_NORMAL;
325 		if (na->random_datain_pdu_offsets)
326 			bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
327 		if (na->random_datain_seq_offsets)
328 			bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
329 	} else {
330 		bl.data_direction = ISCSI_PDU_WRITE;
331 		bl.immediate_data_length = immediate_data_length;
332 		if (na->random_r2t_offsets)
333 			bl.randomize |= RANDOM_R2T_OFFSETS;
334 
335 		if (!cmd->immediate_data && !cmd->unsolicited_data)
336 			bl.type = PDULIST_NORMAL;
337 		else if (cmd->immediate_data && !cmd->unsolicited_data)
338 			bl.type = PDULIST_IMMEDIATE;
339 		else if (!cmd->immediate_data && cmd->unsolicited_data)
340 			bl.type = PDULIST_UNSOLICITED;
341 		else if (cmd->immediate_data && cmd->unsolicited_data)
342 			bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
343 	}
344 
345 	return iscsit_do_build_list(cmd, &bl);
346 }
347 
348 struct iscsi_seq *iscsit_get_seq_holder_for_datain(
349 	struct iscsi_cmd *cmd,
350 	u32 seq_send_order)
351 {
352 	u32 i;
353 
354 	for (i = 0; i < cmd->seq_count; i++)
355 		if (cmd->seq_list[i].seq_send_order == seq_send_order)
356 			return &cmd->seq_list[i];
357 
358 	return NULL;
359 }
360 
361 struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
362 {
363 	u32 i;
364 
365 	if (!cmd->seq_list) {
366 		pr_err("struct iscsi_cmd->seq_list is NULL!\n");
367 		return NULL;
368 	}
369 
370 	for (i = 0; i < cmd->seq_count; i++) {
371 		if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
372 			continue;
373 		if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
374 			cmd->seq_send_order++;
375 			return &cmd->seq_list[i];
376 		}
377 	}
378 
379 	return NULL;
380 }
381 
382 struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
383 	struct iscsi_cmd *cmd,
384 	u32 r2t_sn)
385 {
386 	struct iscsi_r2t *r2t;
387 
388 	spin_lock_bh(&cmd->r2t_lock);
389 	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
390 		if (r2t->r2t_sn == r2t_sn) {
391 			spin_unlock_bh(&cmd->r2t_lock);
392 			return r2t;
393 		}
394 	}
395 	spin_unlock_bh(&cmd->r2t_lock);
396 
397 	return NULL;
398 }
399 
400 static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
401 {
402 	int ret;
403 
404 	/*
405 	 * This is the proper method of checking received CmdSN against
406 	 * ExpCmdSN and MaxCmdSN values, as well as accounting for out
407 	 * or order CmdSNs due to multiple connection sessions and/or
408 	 * CRC failures.
409 	 */
410 	if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
411 		pr_err("Received CmdSN: 0x%08x is greater than"
412 		       " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
413 		       sess->max_cmd_sn);
414 		ret = CMDSN_ERROR_CANNOT_RECOVER;
415 
416 	} else if (cmdsn == sess->exp_cmd_sn) {
417 		sess->exp_cmd_sn++;
418 		pr_debug("Received CmdSN matches ExpCmdSN,"
419 		      " incremented ExpCmdSN to: 0x%08x\n",
420 		      sess->exp_cmd_sn);
421 		ret = CMDSN_NORMAL_OPERATION;
422 
423 	} else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
424 		pr_debug("Received CmdSN: 0x%08x is greater"
425 		      " than ExpCmdSN: 0x%08x, not acknowledging.\n",
426 		      cmdsn, sess->exp_cmd_sn);
427 		ret = CMDSN_HIGHER_THAN_EXP;
428 
429 	} else {
430 		pr_err("Received CmdSN: 0x%08x is less than"
431 		       " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
432 		       sess->exp_cmd_sn);
433 		ret = CMDSN_LOWER_THAN_EXP;
434 	}
435 
436 	return ret;
437 }
438 
439 /*
440  * Commands may be received out of order if MC/S is in use.
441  * Ensure they are executed in CmdSN order.
442  */
443 int iscsit_sequence_cmd(
444 	struct iscsi_conn *conn,
445 	struct iscsi_cmd *cmd,
446 	u32 cmdsn)
447 {
448 	int ret;
449 	int cmdsn_ret;
450 
451 	mutex_lock(&conn->sess->cmdsn_mutex);
452 
453 	cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn);
454 	switch (cmdsn_ret) {
455 	case CMDSN_NORMAL_OPERATION:
456 		ret = iscsit_execute_cmd(cmd, 0);
457 		if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
458 			iscsit_execute_ooo_cmdsns(conn->sess);
459 		break;
460 	case CMDSN_HIGHER_THAN_EXP:
461 		ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn);
462 		break;
463 	case CMDSN_LOWER_THAN_EXP:
464 		cmd->i_state = ISTATE_REMOVE;
465 		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
466 		ret = cmdsn_ret;
467 		break;
468 	default:
469 		ret = cmdsn_ret;
470 		break;
471 	}
472 	mutex_unlock(&conn->sess->cmdsn_mutex);
473 
474 	return ret;
475 }
476 
477 int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
478 {
479 	struct iscsi_conn *conn = cmd->conn;
480 	struct se_cmd *se_cmd = &cmd->se_cmd;
481 	struct iscsi_data *hdr = (struct iscsi_data *) buf;
482 	u32 payload_length = ntoh24(hdr->dlength);
483 
484 	if (conn->sess->sess_ops->InitialR2T) {
485 		pr_err("Received unexpected unsolicited data"
486 			" while InitialR2T=Yes, protocol error.\n");
487 		transport_send_check_condition_and_sense(se_cmd,
488 				TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
489 		return -1;
490 	}
491 
492 	if ((cmd->first_burst_len + payload_length) >
493 	     conn->sess->sess_ops->FirstBurstLength) {
494 		pr_err("Total %u bytes exceeds FirstBurstLength: %u"
495 			" for this Unsolicited DataOut Burst.\n",
496 			(cmd->first_burst_len + payload_length),
497 				conn->sess->sess_ops->FirstBurstLength);
498 		transport_send_check_condition_and_sense(se_cmd,
499 				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
500 		return -1;
501 	}
502 
503 	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
504 		return 0;
505 
506 	if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
507 	    ((cmd->first_burst_len + payload_length) !=
508 	      conn->sess->sess_ops->FirstBurstLength)) {
509 		pr_err("Unsolicited non-immediate data received %u"
510 			" does not equal FirstBurstLength: %u, and does"
511 			" not equal ExpXferLen %u.\n",
512 			(cmd->first_burst_len + payload_length),
513 			conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
514 		transport_send_check_condition_and_sense(se_cmd,
515 				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
516 		return -1;
517 	}
518 	return 0;
519 }
520 
521 struct iscsi_cmd *iscsit_find_cmd_from_itt(
522 	struct iscsi_conn *conn,
523 	u32 init_task_tag)
524 {
525 	struct iscsi_cmd *cmd;
526 
527 	spin_lock_bh(&conn->cmd_lock);
528 	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
529 		if (cmd->init_task_tag == init_task_tag) {
530 			spin_unlock_bh(&conn->cmd_lock);
531 			return cmd;
532 		}
533 	}
534 	spin_unlock_bh(&conn->cmd_lock);
535 
536 	pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
537 			init_task_tag, conn->cid);
538 	return NULL;
539 }
540 
541 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
542 	struct iscsi_conn *conn,
543 	u32 init_task_tag,
544 	u32 length)
545 {
546 	struct iscsi_cmd *cmd;
547 
548 	spin_lock_bh(&conn->cmd_lock);
549 	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
550 		if (cmd->init_task_tag == init_task_tag) {
551 			spin_unlock_bh(&conn->cmd_lock);
552 			return cmd;
553 		}
554 	}
555 	spin_unlock_bh(&conn->cmd_lock);
556 
557 	pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
558 			" dumping payload\n", init_task_tag, conn->cid);
559 	if (length)
560 		iscsit_dump_data_payload(conn, length, 1);
561 
562 	return NULL;
563 }
564 
565 struct iscsi_cmd *iscsit_find_cmd_from_ttt(
566 	struct iscsi_conn *conn,
567 	u32 targ_xfer_tag)
568 {
569 	struct iscsi_cmd *cmd = NULL;
570 
571 	spin_lock_bh(&conn->cmd_lock);
572 	list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
573 		if (cmd->targ_xfer_tag == targ_xfer_tag) {
574 			spin_unlock_bh(&conn->cmd_lock);
575 			return cmd;
576 		}
577 	}
578 	spin_unlock_bh(&conn->cmd_lock);
579 
580 	pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
581 			targ_xfer_tag, conn->cid);
582 	return NULL;
583 }
584 
585 int iscsit_find_cmd_for_recovery(
586 	struct iscsi_session *sess,
587 	struct iscsi_cmd **cmd_ptr,
588 	struct iscsi_conn_recovery **cr_ptr,
589 	u32 init_task_tag)
590 {
591 	struct iscsi_cmd *cmd = NULL;
592 	struct iscsi_conn_recovery *cr;
593 	/*
594 	 * Scan through the inactive connection recovery list's command list.
595 	 * If init_task_tag matches the command is still alligent.
596 	 */
597 	spin_lock(&sess->cr_i_lock);
598 	list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
599 		spin_lock(&cr->conn_recovery_cmd_lock);
600 		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
601 			if (cmd->init_task_tag == init_task_tag) {
602 				spin_unlock(&cr->conn_recovery_cmd_lock);
603 				spin_unlock(&sess->cr_i_lock);
604 
605 				*cr_ptr = cr;
606 				*cmd_ptr = cmd;
607 				return -2;
608 			}
609 		}
610 		spin_unlock(&cr->conn_recovery_cmd_lock);
611 	}
612 	spin_unlock(&sess->cr_i_lock);
613 	/*
614 	 * Scan through the active connection recovery list's command list.
615 	 * If init_task_tag matches the command is ready to be reassigned.
616 	 */
617 	spin_lock(&sess->cr_a_lock);
618 	list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
619 		spin_lock(&cr->conn_recovery_cmd_lock);
620 		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
621 			if (cmd->init_task_tag == init_task_tag) {
622 				spin_unlock(&cr->conn_recovery_cmd_lock);
623 				spin_unlock(&sess->cr_a_lock);
624 
625 				*cr_ptr = cr;
626 				*cmd_ptr = cmd;
627 				return 0;
628 			}
629 		}
630 		spin_unlock(&cr->conn_recovery_cmd_lock);
631 	}
632 	spin_unlock(&sess->cr_a_lock);
633 
634 	return -1;
635 }
636 
637 void iscsit_add_cmd_to_immediate_queue(
638 	struct iscsi_cmd *cmd,
639 	struct iscsi_conn *conn,
640 	u8 state)
641 {
642 	struct iscsi_queue_req *qr;
643 
644 	qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
645 	if (!qr) {
646 		pr_err("Unable to allocate memory for"
647 				" struct iscsi_queue_req\n");
648 		return;
649 	}
650 	INIT_LIST_HEAD(&qr->qr_list);
651 	qr->cmd = cmd;
652 	qr->state = state;
653 
654 	spin_lock_bh(&conn->immed_queue_lock);
655 	list_add_tail(&qr->qr_list, &conn->immed_queue_list);
656 	atomic_inc(&cmd->immed_queue_count);
657 	atomic_set(&conn->check_immediate_queue, 1);
658 	spin_unlock_bh(&conn->immed_queue_lock);
659 
660 	wake_up_process(conn->thread_set->tx_thread);
661 }
662 
663 struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
664 {
665 	struct iscsi_queue_req *qr;
666 
667 	spin_lock_bh(&conn->immed_queue_lock);
668 	if (list_empty(&conn->immed_queue_list)) {
669 		spin_unlock_bh(&conn->immed_queue_lock);
670 		return NULL;
671 	}
672 	list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
673 		break;
674 
675 	list_del(&qr->qr_list);
676 	if (qr->cmd)
677 		atomic_dec(&qr->cmd->immed_queue_count);
678 	spin_unlock_bh(&conn->immed_queue_lock);
679 
680 	return qr;
681 }
682 
683 static void iscsit_remove_cmd_from_immediate_queue(
684 	struct iscsi_cmd *cmd,
685 	struct iscsi_conn *conn)
686 {
687 	struct iscsi_queue_req *qr, *qr_tmp;
688 
689 	spin_lock_bh(&conn->immed_queue_lock);
690 	if (!atomic_read(&cmd->immed_queue_count)) {
691 		spin_unlock_bh(&conn->immed_queue_lock);
692 		return;
693 	}
694 
695 	list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
696 		if (qr->cmd != cmd)
697 			continue;
698 
699 		atomic_dec(&qr->cmd->immed_queue_count);
700 		list_del(&qr->qr_list);
701 		kmem_cache_free(lio_qr_cache, qr);
702 	}
703 	spin_unlock_bh(&conn->immed_queue_lock);
704 
705 	if (atomic_read(&cmd->immed_queue_count)) {
706 		pr_err("ITT: 0x%08x immed_queue_count: %d\n",
707 			cmd->init_task_tag,
708 			atomic_read(&cmd->immed_queue_count));
709 	}
710 }
711 
712 void iscsit_add_cmd_to_response_queue(
713 	struct iscsi_cmd *cmd,
714 	struct iscsi_conn *conn,
715 	u8 state)
716 {
717 	struct iscsi_queue_req *qr;
718 
719 	qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
720 	if (!qr) {
721 		pr_err("Unable to allocate memory for"
722 			" struct iscsi_queue_req\n");
723 		return;
724 	}
725 	INIT_LIST_HEAD(&qr->qr_list);
726 	qr->cmd = cmd;
727 	qr->state = state;
728 
729 	spin_lock_bh(&conn->response_queue_lock);
730 	list_add_tail(&qr->qr_list, &conn->response_queue_list);
731 	atomic_inc(&cmd->response_queue_count);
732 	spin_unlock_bh(&conn->response_queue_lock);
733 
734 	wake_up_process(conn->thread_set->tx_thread);
735 }
736 
737 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
738 {
739 	struct iscsi_queue_req *qr;
740 
741 	spin_lock_bh(&conn->response_queue_lock);
742 	if (list_empty(&conn->response_queue_list)) {
743 		spin_unlock_bh(&conn->response_queue_lock);
744 		return NULL;
745 	}
746 
747 	list_for_each_entry(qr, &conn->response_queue_list, qr_list)
748 		break;
749 
750 	list_del(&qr->qr_list);
751 	if (qr->cmd)
752 		atomic_dec(&qr->cmd->response_queue_count);
753 	spin_unlock_bh(&conn->response_queue_lock);
754 
755 	return qr;
756 }
757 
758 static void iscsit_remove_cmd_from_response_queue(
759 	struct iscsi_cmd *cmd,
760 	struct iscsi_conn *conn)
761 {
762 	struct iscsi_queue_req *qr, *qr_tmp;
763 
764 	spin_lock_bh(&conn->response_queue_lock);
765 	if (!atomic_read(&cmd->response_queue_count)) {
766 		spin_unlock_bh(&conn->response_queue_lock);
767 		return;
768 	}
769 
770 	list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
771 				qr_list) {
772 		if (qr->cmd != cmd)
773 			continue;
774 
775 		atomic_dec(&qr->cmd->response_queue_count);
776 		list_del(&qr->qr_list);
777 		kmem_cache_free(lio_qr_cache, qr);
778 	}
779 	spin_unlock_bh(&conn->response_queue_lock);
780 
781 	if (atomic_read(&cmd->response_queue_count)) {
782 		pr_err("ITT: 0x%08x response_queue_count: %d\n",
783 			cmd->init_task_tag,
784 			atomic_read(&cmd->response_queue_count));
785 	}
786 }
787 
788 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
789 {
790 	struct iscsi_queue_req *qr, *qr_tmp;
791 
792 	spin_lock_bh(&conn->immed_queue_lock);
793 	list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
794 		list_del(&qr->qr_list);
795 		if (qr->cmd)
796 			atomic_dec(&qr->cmd->immed_queue_count);
797 
798 		kmem_cache_free(lio_qr_cache, qr);
799 	}
800 	spin_unlock_bh(&conn->immed_queue_lock);
801 
802 	spin_lock_bh(&conn->response_queue_lock);
803 	list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
804 			qr_list) {
805 		list_del(&qr->qr_list);
806 		if (qr->cmd)
807 			atomic_dec(&qr->cmd->response_queue_count);
808 
809 		kmem_cache_free(lio_qr_cache, qr);
810 	}
811 	spin_unlock_bh(&conn->response_queue_lock);
812 }
813 
814 void iscsit_release_cmd(struct iscsi_cmd *cmd)
815 {
816 	struct iscsi_conn *conn = cmd->conn;
817 	int i;
818 
819 	iscsit_free_r2ts_from_list(cmd);
820 	iscsit_free_all_datain_reqs(cmd);
821 
822 	kfree(cmd->buf_ptr);
823 	kfree(cmd->pdu_list);
824 	kfree(cmd->seq_list);
825 	kfree(cmd->tmr_req);
826 	kfree(cmd->iov_data);
827 
828 	for (i = 0; i < cmd->t_mem_sg_nents; i++)
829 		__free_page(sg_page(&cmd->t_mem_sg[i]));
830 
831 	kfree(cmd->t_mem_sg);
832 
833 	if (conn) {
834 		iscsit_remove_cmd_from_immediate_queue(cmd, conn);
835 		iscsit_remove_cmd_from_response_queue(cmd, conn);
836 	}
837 
838 	kmem_cache_free(lio_cmd_cache, cmd);
839 }
840 
841 void iscsit_free_cmd(struct iscsi_cmd *cmd)
842 {
843 	/*
844 	 * Determine if a struct se_cmd is assoicated with
845 	 * this struct iscsi_cmd.
846 	 */
847 	switch (cmd->iscsi_opcode) {
848 	case ISCSI_OP_SCSI_CMD:
849 	case ISCSI_OP_SCSI_TMFUNC:
850 		transport_generic_free_cmd(&cmd->se_cmd, 1);
851 		break;
852 	default:
853 		iscsit_release_cmd(cmd);
854 		break;
855 	}
856 }
857 
858 int iscsit_check_session_usage_count(struct iscsi_session *sess)
859 {
860 	spin_lock_bh(&sess->session_usage_lock);
861 	if (sess->session_usage_count != 0) {
862 		sess->session_waiting_on_uc = 1;
863 		spin_unlock_bh(&sess->session_usage_lock);
864 		if (in_interrupt())
865 			return 2;
866 
867 		wait_for_completion(&sess->session_waiting_on_uc_comp);
868 		return 1;
869 	}
870 	spin_unlock_bh(&sess->session_usage_lock);
871 
872 	return 0;
873 }
874 
875 void iscsit_dec_session_usage_count(struct iscsi_session *sess)
876 {
877 	spin_lock_bh(&sess->session_usage_lock);
878 	sess->session_usage_count--;
879 
880 	if (!sess->session_usage_count && sess->session_waiting_on_uc)
881 		complete(&sess->session_waiting_on_uc_comp);
882 
883 	spin_unlock_bh(&sess->session_usage_lock);
884 }
885 
886 void iscsit_inc_session_usage_count(struct iscsi_session *sess)
887 {
888 	spin_lock_bh(&sess->session_usage_lock);
889 	sess->session_usage_count++;
890 	spin_unlock_bh(&sess->session_usage_lock);
891 }
892 
893 /*
894  *	Setup conn->if_marker and conn->of_marker values based upon
895  *	the initial marker-less interval. (see iSCSI v19 A.2)
896  */
897 int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
898 {
899 	int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
900 	/*
901 	 * IFMarkInt and OFMarkInt are negotiated as 32-bit words.
902 	 */
903 	u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
904 	u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
905 
906 	if (conn->conn_ops->OFMarker) {
907 		/*
908 		 * Account for the first Login Command received not
909 		 * via iscsi_recv_msg().
910 		 */
911 		conn->of_marker += ISCSI_HDR_LEN;
912 		if (conn->of_marker <= OFMarkInt) {
913 			conn->of_marker = (OFMarkInt - conn->of_marker);
914 		} else {
915 			login_ofmarker_count = (conn->of_marker / OFMarkInt);
916 			next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
917 					(login_ofmarker_count * MARKER_SIZE);
918 			conn->of_marker = (next_marker - conn->of_marker);
919 		}
920 		conn->of_marker_offset = 0;
921 		pr_debug("Setting OFMarker value to %u based on Initial"
922 			" Markerless Interval.\n", conn->of_marker);
923 	}
924 
925 	if (conn->conn_ops->IFMarker) {
926 		if (conn->if_marker <= IFMarkInt) {
927 			conn->if_marker = (IFMarkInt - conn->if_marker);
928 		} else {
929 			login_ifmarker_count = (conn->if_marker / IFMarkInt);
930 			next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
931 					(login_ifmarker_count * MARKER_SIZE);
932 			conn->if_marker = (next_marker - conn->if_marker);
933 		}
934 		pr_debug("Setting IFMarker value to %u based on Initial"
935 			" Markerless Interval.\n", conn->if_marker);
936 	}
937 
938 	return 0;
939 }
940 
941 struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
942 {
943 	struct iscsi_conn *conn;
944 
945 	spin_lock_bh(&sess->conn_lock);
946 	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
947 		if ((conn->cid == cid) &&
948 		    (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
949 			iscsit_inc_conn_usage_count(conn);
950 			spin_unlock_bh(&sess->conn_lock);
951 			return conn;
952 		}
953 	}
954 	spin_unlock_bh(&sess->conn_lock);
955 
956 	return NULL;
957 }
958 
959 struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
960 {
961 	struct iscsi_conn *conn;
962 
963 	spin_lock_bh(&sess->conn_lock);
964 	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
965 		if (conn->cid == cid) {
966 			iscsit_inc_conn_usage_count(conn);
967 			spin_lock(&conn->state_lock);
968 			atomic_set(&conn->connection_wait_rcfr, 1);
969 			spin_unlock(&conn->state_lock);
970 			spin_unlock_bh(&sess->conn_lock);
971 			return conn;
972 		}
973 	}
974 	spin_unlock_bh(&sess->conn_lock);
975 
976 	return NULL;
977 }
978 
979 void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
980 {
981 	spin_lock_bh(&conn->conn_usage_lock);
982 	if (conn->conn_usage_count != 0) {
983 		conn->conn_waiting_on_uc = 1;
984 		spin_unlock_bh(&conn->conn_usage_lock);
985 
986 		wait_for_completion(&conn->conn_waiting_on_uc_comp);
987 		return;
988 	}
989 	spin_unlock_bh(&conn->conn_usage_lock);
990 }
991 
992 void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
993 {
994 	spin_lock_bh(&conn->conn_usage_lock);
995 	conn->conn_usage_count--;
996 
997 	if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
998 		complete(&conn->conn_waiting_on_uc_comp);
999 
1000 	spin_unlock_bh(&conn->conn_usage_lock);
1001 }
1002 
1003 void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
1004 {
1005 	spin_lock_bh(&conn->conn_usage_lock);
1006 	conn->conn_usage_count++;
1007 	spin_unlock_bh(&conn->conn_usage_lock);
1008 }
1009 
1010 static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
1011 {
1012 	u8 state;
1013 	struct iscsi_cmd *cmd;
1014 
1015 	cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
1016 	if (!cmd)
1017 		return -1;
1018 
1019 	cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
1020 	state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
1021 				ISTATE_SEND_NOPIN_NO_RESPONSE;
1022 	cmd->init_task_tag = 0xFFFFFFFF;
1023 	spin_lock_bh(&conn->sess->ttt_lock);
1024 	cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
1025 			0xFFFFFFFF;
1026 	if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
1027 		cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
1028 	spin_unlock_bh(&conn->sess->ttt_lock);
1029 
1030 	spin_lock_bh(&conn->cmd_lock);
1031 	list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
1032 	spin_unlock_bh(&conn->cmd_lock);
1033 
1034 	if (want_response)
1035 		iscsit_start_nopin_response_timer(conn);
1036 	iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
1037 
1038 	return 0;
1039 }
1040 
1041 static void iscsit_handle_nopin_response_timeout(unsigned long data)
1042 {
1043 	struct iscsi_conn *conn = (struct iscsi_conn *) data;
1044 
1045 	iscsit_inc_conn_usage_count(conn);
1046 
1047 	spin_lock_bh(&conn->nopin_timer_lock);
1048 	if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
1049 		spin_unlock_bh(&conn->nopin_timer_lock);
1050 		iscsit_dec_conn_usage_count(conn);
1051 		return;
1052 	}
1053 
1054 	pr_debug("Did not receive response to NOPIN on CID: %hu on"
1055 		" SID: %u, failing connection.\n", conn->cid,
1056 			conn->sess->sid);
1057 	conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
1058 	spin_unlock_bh(&conn->nopin_timer_lock);
1059 
1060 	{
1061 	struct iscsi_portal_group *tpg = conn->sess->tpg;
1062 	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
1063 
1064 	if (tiqn) {
1065 		spin_lock_bh(&tiqn->sess_err_stats.lock);
1066 		strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
1067 				conn->sess->sess_ops->InitiatorName);
1068 		tiqn->sess_err_stats.last_sess_failure_type =
1069 				ISCSI_SESS_ERR_CXN_TIMEOUT;
1070 		tiqn->sess_err_stats.cxn_timeout_errors++;
1071 		conn->sess->conn_timeout_errors++;
1072 		spin_unlock_bh(&tiqn->sess_err_stats.lock);
1073 	}
1074 	}
1075 
1076 	iscsit_cause_connection_reinstatement(conn, 0);
1077 	iscsit_dec_conn_usage_count(conn);
1078 }
1079 
1080 void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
1081 {
1082 	struct iscsi_session *sess = conn->sess;
1083 	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1084 
1085 	spin_lock_bh(&conn->nopin_timer_lock);
1086 	if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
1087 		spin_unlock_bh(&conn->nopin_timer_lock);
1088 		return;
1089 	}
1090 
1091 	mod_timer(&conn->nopin_response_timer,
1092 		(get_jiffies_64() + na->nopin_response_timeout * HZ));
1093 	spin_unlock_bh(&conn->nopin_timer_lock);
1094 }
1095 
1096 /*
1097  *	Called with conn->nopin_timer_lock held.
1098  */
1099 void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
1100 {
1101 	struct iscsi_session *sess = conn->sess;
1102 	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1103 
1104 	spin_lock_bh(&conn->nopin_timer_lock);
1105 	if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
1106 		spin_unlock_bh(&conn->nopin_timer_lock);
1107 		return;
1108 	}
1109 
1110 	init_timer(&conn->nopin_response_timer);
1111 	conn->nopin_response_timer.expires =
1112 		(get_jiffies_64() + na->nopin_response_timeout * HZ);
1113 	conn->nopin_response_timer.data = (unsigned long)conn;
1114 	conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
1115 	conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
1116 	conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
1117 	add_timer(&conn->nopin_response_timer);
1118 
1119 	pr_debug("Started NOPIN Response Timer on CID: %d to %u"
1120 		" seconds\n", conn->cid, na->nopin_response_timeout);
1121 	spin_unlock_bh(&conn->nopin_timer_lock);
1122 }
1123 
1124 void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
1125 {
1126 	spin_lock_bh(&conn->nopin_timer_lock);
1127 	if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
1128 		spin_unlock_bh(&conn->nopin_timer_lock);
1129 		return;
1130 	}
1131 	conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
1132 	spin_unlock_bh(&conn->nopin_timer_lock);
1133 
1134 	del_timer_sync(&conn->nopin_response_timer);
1135 
1136 	spin_lock_bh(&conn->nopin_timer_lock);
1137 	conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
1138 	spin_unlock_bh(&conn->nopin_timer_lock);
1139 }
1140 
1141 static void iscsit_handle_nopin_timeout(unsigned long data)
1142 {
1143 	struct iscsi_conn *conn = (struct iscsi_conn *) data;
1144 
1145 	iscsit_inc_conn_usage_count(conn);
1146 
1147 	spin_lock_bh(&conn->nopin_timer_lock);
1148 	if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
1149 		spin_unlock_bh(&conn->nopin_timer_lock);
1150 		iscsit_dec_conn_usage_count(conn);
1151 		return;
1152 	}
1153 	conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1154 	spin_unlock_bh(&conn->nopin_timer_lock);
1155 
1156 	iscsit_add_nopin(conn, 1);
1157 	iscsit_dec_conn_usage_count(conn);
1158 }
1159 
1160 /*
1161  * Called with conn->nopin_timer_lock held.
1162  */
1163 void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
1164 {
1165 	struct iscsi_session *sess = conn->sess;
1166 	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1167 	/*
1168 	* NOPIN timeout is disabled.
1169 	 */
1170 	if (!na->nopin_timeout)
1171 		return;
1172 
1173 	if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
1174 		return;
1175 
1176 	init_timer(&conn->nopin_timer);
1177 	conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
1178 	conn->nopin_timer.data = (unsigned long)conn;
1179 	conn->nopin_timer.function = iscsit_handle_nopin_timeout;
1180 	conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1181 	conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1182 	add_timer(&conn->nopin_timer);
1183 
1184 	pr_debug("Started NOPIN Timer on CID: %d at %u second"
1185 		" interval\n", conn->cid, na->nopin_timeout);
1186 }
1187 
1188 void iscsit_start_nopin_timer(struct iscsi_conn *conn)
1189 {
1190 	struct iscsi_session *sess = conn->sess;
1191 	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1192 	/*
1193 	 * NOPIN timeout is disabled..
1194 	 */
1195 	if (!na->nopin_timeout)
1196 		return;
1197 
1198 	spin_lock_bh(&conn->nopin_timer_lock);
1199 	if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
1200 		spin_unlock_bh(&conn->nopin_timer_lock);
1201 		return;
1202 	}
1203 
1204 	init_timer(&conn->nopin_timer);
1205 	conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
1206 	conn->nopin_timer.data = (unsigned long)conn;
1207 	conn->nopin_timer.function = iscsit_handle_nopin_timeout;
1208 	conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1209 	conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1210 	add_timer(&conn->nopin_timer);
1211 
1212 	pr_debug("Started NOPIN Timer on CID: %d at %u second"
1213 			" interval\n", conn->cid, na->nopin_timeout);
1214 	spin_unlock_bh(&conn->nopin_timer_lock);
1215 }
1216 
1217 void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
1218 {
1219 	spin_lock_bh(&conn->nopin_timer_lock);
1220 	if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
1221 		spin_unlock_bh(&conn->nopin_timer_lock);
1222 		return;
1223 	}
1224 	conn->nopin_timer_flags |= ISCSI_TF_STOP;
1225 	spin_unlock_bh(&conn->nopin_timer_lock);
1226 
1227 	del_timer_sync(&conn->nopin_timer);
1228 
1229 	spin_lock_bh(&conn->nopin_timer_lock);
1230 	conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1231 	spin_unlock_bh(&conn->nopin_timer_lock);
1232 }
1233 
1234 int iscsit_send_tx_data(
1235 	struct iscsi_cmd *cmd,
1236 	struct iscsi_conn *conn,
1237 	int use_misc)
1238 {
1239 	int tx_sent, tx_size;
1240 	u32 iov_count;
1241 	struct kvec *iov;
1242 
1243 send_data:
1244 	tx_size = cmd->tx_size;
1245 
1246 	if (!use_misc) {
1247 		iov = &cmd->iov_data[0];
1248 		iov_count = cmd->iov_data_count;
1249 	} else {
1250 		iov = &cmd->iov_misc[0];
1251 		iov_count = cmd->iov_misc_count;
1252 	}
1253 
1254 	tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
1255 	if (tx_size != tx_sent) {
1256 		if (tx_sent == -EAGAIN) {
1257 			pr_err("tx_data() returned -EAGAIN\n");
1258 			goto send_data;
1259 		} else
1260 			return -1;
1261 	}
1262 	cmd->tx_size = 0;
1263 
1264 	return 0;
1265 }
1266 
1267 int iscsit_fe_sendpage_sg(
1268 	struct iscsi_cmd *cmd,
1269 	struct iscsi_conn *conn)
1270 {
1271 	struct scatterlist *sg = cmd->first_data_sg;
1272 	struct kvec iov;
1273 	u32 tx_hdr_size, data_len;
1274 	u32 offset = cmd->first_data_sg_off;
1275 	int tx_sent, iov_off;
1276 
1277 send_hdr:
1278 	tx_hdr_size = ISCSI_HDR_LEN;
1279 	if (conn->conn_ops->HeaderDigest)
1280 		tx_hdr_size += ISCSI_CRC_LEN;
1281 
1282 	iov.iov_base = cmd->pdu;
1283 	iov.iov_len = tx_hdr_size;
1284 
1285 	tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
1286 	if (tx_hdr_size != tx_sent) {
1287 		if (tx_sent == -EAGAIN) {
1288 			pr_err("tx_data() returned -EAGAIN\n");
1289 			goto send_hdr;
1290 		}
1291 		return -1;
1292 	}
1293 
1294 	data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
1295 	/*
1296 	 * Set iov_off used by padding and data digest tx_data() calls below
1297 	 * in order to determine proper offset into cmd->iov_data[]
1298 	 */
1299 	if (conn->conn_ops->DataDigest) {
1300 		data_len -= ISCSI_CRC_LEN;
1301 		if (cmd->padding)
1302 			iov_off = (cmd->iov_data_count - 2);
1303 		else
1304 			iov_off = (cmd->iov_data_count - 1);
1305 	} else {
1306 		iov_off = (cmd->iov_data_count - 1);
1307 	}
1308 	/*
1309 	 * Perform sendpage() for each page in the scatterlist
1310 	 */
1311 	while (data_len) {
1312 		u32 space = (sg->length - offset);
1313 		u32 sub_len = min_t(u32, data_len, space);
1314 send_pg:
1315 		tx_sent = conn->sock->ops->sendpage(conn->sock,
1316 					sg_page(sg), sg->offset + offset, sub_len, 0);
1317 		if (tx_sent != sub_len) {
1318 			if (tx_sent == -EAGAIN) {
1319 				pr_err("tcp_sendpage() returned"
1320 						" -EAGAIN\n");
1321 				goto send_pg;
1322 			}
1323 
1324 			pr_err("tcp_sendpage() failure: %d\n",
1325 					tx_sent);
1326 			return -1;
1327 		}
1328 
1329 		data_len -= sub_len;
1330 		offset = 0;
1331 		sg = sg_next(sg);
1332 	}
1333 
1334 send_padding:
1335 	if (cmd->padding) {
1336 		struct kvec *iov_p = &cmd->iov_data[iov_off++];
1337 
1338 		tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
1339 		if (cmd->padding != tx_sent) {
1340 			if (tx_sent == -EAGAIN) {
1341 				pr_err("tx_data() returned -EAGAIN\n");
1342 				goto send_padding;
1343 			}
1344 			return -1;
1345 		}
1346 	}
1347 
1348 send_datacrc:
1349 	if (conn->conn_ops->DataDigest) {
1350 		struct kvec *iov_d = &cmd->iov_data[iov_off];
1351 
1352 		tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
1353 		if (ISCSI_CRC_LEN != tx_sent) {
1354 			if (tx_sent == -EAGAIN) {
1355 				pr_err("tx_data() returned -EAGAIN\n");
1356 				goto send_datacrc;
1357 			}
1358 			return -1;
1359 		}
1360 	}
1361 
1362 	return 0;
1363 }
1364 
1365 /*
1366  *      This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
1367  *      back to the Initiator when an expection condition occurs with the
1368  *      errors set in status_class and status_detail.
1369  *
1370  *      Parameters:     iSCSI Connection, Status Class, Status Detail.
1371  *      Returns:        0 on success, -1 on error.
1372  */
1373 int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
1374 {
1375 	u8 iscsi_hdr[ISCSI_HDR_LEN];
1376 	int err;
1377 	struct kvec iov;
1378 	struct iscsi_login_rsp *hdr;
1379 
1380 	iscsit_collect_login_stats(conn, status_class, status_detail);
1381 
1382 	memset(&iov, 0, sizeof(struct kvec));
1383 	memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
1384 
1385 	hdr	= (struct iscsi_login_rsp *)&iscsi_hdr;
1386 	hdr->opcode		= ISCSI_OP_LOGIN_RSP;
1387 	hdr->status_class	= status_class;
1388 	hdr->status_detail	= status_detail;
1389 	hdr->itt		= cpu_to_be32(conn->login_itt);
1390 
1391 	iov.iov_base		= &iscsi_hdr;
1392 	iov.iov_len		= ISCSI_HDR_LEN;
1393 
1394 	PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
1395 
1396 	err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
1397 	if (err != ISCSI_HDR_LEN) {
1398 		pr_err("tx_data returned less than expected\n");
1399 		return -1;
1400 	}
1401 
1402 	return 0;
1403 }
1404 
1405 void iscsit_print_session_params(struct iscsi_session *sess)
1406 {
1407 	struct iscsi_conn *conn;
1408 
1409 	pr_debug("-----------------------------[Session Params for"
1410 		" SID: %u]-----------------------------\n", sess->sid);
1411 	spin_lock_bh(&sess->conn_lock);
1412 	list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
1413 		iscsi_dump_conn_ops(conn->conn_ops);
1414 	spin_unlock_bh(&sess->conn_lock);
1415 
1416 	iscsi_dump_sess_ops(sess->sess_ops);
1417 }
1418 
1419 static int iscsit_do_rx_data(
1420 	struct iscsi_conn *conn,
1421 	struct iscsi_data_count *count)
1422 {
1423 	int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
1424 	struct kvec *iov_p;
1425 	struct msghdr msg;
1426 
1427 	if (!conn || !conn->sock || !conn->conn_ops)
1428 		return -1;
1429 
1430 	memset(&msg, 0, sizeof(struct msghdr));
1431 
1432 	iov_p = count->iov;
1433 	iov_len	= count->iov_count;
1434 
1435 	while (total_rx < data) {
1436 		rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
1437 					(data - total_rx), MSG_WAITALL);
1438 		if (rx_loop <= 0) {
1439 			pr_debug("rx_loop: %d total_rx: %d\n",
1440 				rx_loop, total_rx);
1441 			return rx_loop;
1442 		}
1443 		total_rx += rx_loop;
1444 		pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
1445 				rx_loop, total_rx, data);
1446 	}
1447 
1448 	return total_rx;
1449 }
1450 
1451 static int iscsit_do_tx_data(
1452 	struct iscsi_conn *conn,
1453 	struct iscsi_data_count *count)
1454 {
1455 	int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
1456 	struct kvec *iov_p;
1457 	struct msghdr msg;
1458 
1459 	if (!conn || !conn->sock || !conn->conn_ops)
1460 		return -1;
1461 
1462 	if (data <= 0) {
1463 		pr_err("Data length is: %d\n", data);
1464 		return -1;
1465 	}
1466 
1467 	memset(&msg, 0, sizeof(struct msghdr));
1468 
1469 	iov_p = count->iov;
1470 	iov_len = count->iov_count;
1471 
1472 	while (total_tx < data) {
1473 		tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
1474 					(data - total_tx));
1475 		if (tx_loop <= 0) {
1476 			pr_debug("tx_loop: %d total_tx %d\n",
1477 				tx_loop, total_tx);
1478 			return tx_loop;
1479 		}
1480 		total_tx += tx_loop;
1481 		pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1482 					tx_loop, total_tx, data);
1483 	}
1484 
1485 	return total_tx;
1486 }
1487 
1488 int rx_data(
1489 	struct iscsi_conn *conn,
1490 	struct kvec *iov,
1491 	int iov_count,
1492 	int data)
1493 {
1494 	struct iscsi_data_count c;
1495 
1496 	if (!conn || !conn->sock || !conn->conn_ops)
1497 		return -1;
1498 
1499 	memset(&c, 0, sizeof(struct iscsi_data_count));
1500 	c.iov = iov;
1501 	c.iov_count = iov_count;
1502 	c.data_length = data;
1503 	c.type = ISCSI_RX_DATA;
1504 
1505 	return iscsit_do_rx_data(conn, &c);
1506 }
1507 
1508 int tx_data(
1509 	struct iscsi_conn *conn,
1510 	struct kvec *iov,
1511 	int iov_count,
1512 	int data)
1513 {
1514 	struct iscsi_data_count c;
1515 
1516 	if (!conn || !conn->sock || !conn->conn_ops)
1517 		return -1;
1518 
1519 	memset(&c, 0, sizeof(struct iscsi_data_count));
1520 	c.iov = iov;
1521 	c.iov_count = iov_count;
1522 	c.data_length = data;
1523 	c.type = ISCSI_TX_DATA;
1524 
1525 	return iscsit_do_tx_data(conn, &c);
1526 }
1527 
1528 void iscsit_collect_login_stats(
1529 	struct iscsi_conn *conn,
1530 	u8 status_class,
1531 	u8 status_detail)
1532 {
1533 	struct iscsi_param *intrname = NULL;
1534 	struct iscsi_tiqn *tiqn;
1535 	struct iscsi_login_stats *ls;
1536 
1537 	tiqn = iscsit_snmp_get_tiqn(conn);
1538 	if (!tiqn)
1539 		return;
1540 
1541 	ls = &tiqn->login_stats;
1542 
1543 	spin_lock(&ls->lock);
1544 	if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
1545 	    ((get_jiffies_64() - ls->last_fail_time) < 10)) {
1546 		/* We already have the failure info for this login */
1547 		spin_unlock(&ls->lock);
1548 		return;
1549 	}
1550 
1551 	if (status_class == ISCSI_STATUS_CLS_SUCCESS)
1552 		ls->accepts++;
1553 	else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
1554 		ls->redirects++;
1555 		ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
1556 	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
1557 		 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
1558 		ls->authenticate_fails++;
1559 		ls->last_fail_type =  ISCSI_LOGIN_FAIL_AUTHENTICATE;
1560 	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
1561 		 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
1562 		ls->authorize_fails++;
1563 		ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
1564 	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1565 		 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
1566 		ls->negotiate_fails++;
1567 		ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
1568 	} else {
1569 		ls->other_fails++;
1570 		ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
1571 	}
1572 
1573 	/* Save initiator name, ip address and time, if it is a failed login */
1574 	if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
1575 		if (conn->param_list)
1576 			intrname = iscsi_find_param_from_key(INITIATORNAME,
1577 							     conn->param_list);
1578 		strcpy(ls->last_intr_fail_name,
1579 		       (intrname ? intrname->value : "Unknown"));
1580 
1581 		ls->last_intr_fail_ip_family = conn->sock->sk->sk_family;
1582 		snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
1583 				"%s", conn->login_ip);
1584 		ls->last_fail_time = get_jiffies_64();
1585 	}
1586 
1587 	spin_unlock(&ls->lock);
1588 }
1589 
1590 struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
1591 {
1592 	struct iscsi_portal_group *tpg;
1593 
1594 	if (!conn || !conn->sess)
1595 		return NULL;
1596 
1597 	tpg = conn->sess->tpg;
1598 	if (!tpg)
1599 		return NULL;
1600 
1601 	if (!tpg->tpg_tiqn)
1602 		return NULL;
1603 
1604 	return tpg->tpg_tiqn;
1605 }
1606