xref: /linux/drivers/target/iscsi/iscsi_target_util.c (revision 841b86f3289dbe858daeceec36423d4ea286fac2)
1 /*******************************************************************************
2  * This file contains the iSCSI Target specific utility functions.
3  *
4  * (c) Copyright 2007-2013 Datera, Inc.
5  *
6  * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  ******************************************************************************/
18 
19 #include <linux/list.h>
20 #include <linux/percpu_ida.h>
21 #include <net/ipv6.h>         /* ipv6_addr_equal() */
22 #include <scsi/scsi_tcq.h>
23 #include <scsi/iscsi_proto.h>
24 #include <target/target_core_base.h>
25 #include <target/target_core_fabric.h>
26 #include <target/iscsi/iscsi_transport.h>
27 
28 #include <target/iscsi/iscsi_target_core.h>
29 #include "iscsi_target_parameters.h"
30 #include "iscsi_target_seq_pdu_list.h"
31 #include "iscsi_target_datain_values.h"
32 #include "iscsi_target_erl0.h"
33 #include "iscsi_target_erl1.h"
34 #include "iscsi_target_erl2.h"
35 #include "iscsi_target_tpg.h"
36 #include "iscsi_target_util.h"
37 #include "iscsi_target.h"
38 
39 #define PRINT_BUFF(buff, len)					\
40 {								\
41 	int zzz;						\
42 								\
43 	pr_debug("%d:\n", __LINE__);				\
44 	for (zzz = 0; zzz < len; zzz++) {			\
45 		if (zzz % 16 == 0) {				\
46 			if (zzz)				\
47 				pr_debug("\n");			\
48 			pr_debug("%4i: ", zzz);			\
49 		}						\
50 		pr_debug("%02x ", (unsigned char) (buff)[zzz]);	\
51 	}							\
52 	if ((len + 1) % 16)					\
53 		pr_debug("\n");					\
54 }
55 
56 extern struct list_head g_tiqn_list;
57 extern spinlock_t tiqn_lock;
58 
59 /*
60  *	Called with cmd->r2t_lock held.
61  */
62 int iscsit_add_r2t_to_list(
63 	struct iscsi_cmd *cmd,
64 	u32 offset,
65 	u32 xfer_len,
66 	int recovery,
67 	u32 r2t_sn)
68 {
69 	struct iscsi_r2t *r2t;
70 
71 	r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
72 	if (!r2t) {
73 		pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
74 		return -1;
75 	}
76 	INIT_LIST_HEAD(&r2t->r2t_list);
77 
78 	r2t->recovery_r2t = recovery;
79 	r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
80 	r2t->offset = offset;
81 	r2t->xfer_len = xfer_len;
82 	list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
83 	spin_unlock_bh(&cmd->r2t_lock);
84 
85 	iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
86 
87 	spin_lock_bh(&cmd->r2t_lock);
88 	return 0;
89 }
90 
91 struct iscsi_r2t *iscsit_get_r2t_for_eos(
92 	struct iscsi_cmd *cmd,
93 	u32 offset,
94 	u32 length)
95 {
96 	struct iscsi_r2t *r2t;
97 
98 	spin_lock_bh(&cmd->r2t_lock);
99 	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
100 		if ((r2t->offset <= offset) &&
101 		    (r2t->offset + r2t->xfer_len) >= (offset + length)) {
102 			spin_unlock_bh(&cmd->r2t_lock);
103 			return r2t;
104 		}
105 	}
106 	spin_unlock_bh(&cmd->r2t_lock);
107 
108 	pr_err("Unable to locate R2T for Offset: %u, Length:"
109 			" %u\n", offset, length);
110 	return NULL;
111 }
112 
113 struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
114 {
115 	struct iscsi_r2t *r2t;
116 
117 	spin_lock_bh(&cmd->r2t_lock);
118 	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
119 		if (!r2t->sent_r2t) {
120 			spin_unlock_bh(&cmd->r2t_lock);
121 			return r2t;
122 		}
123 	}
124 	spin_unlock_bh(&cmd->r2t_lock);
125 
126 	pr_err("Unable to locate next R2T to send for ITT:"
127 			" 0x%08x.\n", cmd->init_task_tag);
128 	return NULL;
129 }
130 
131 /*
132  *	Called with cmd->r2t_lock held.
133  */
134 void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
135 {
136 	list_del(&r2t->r2t_list);
137 	kmem_cache_free(lio_r2t_cache, r2t);
138 }
139 
140 void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
141 {
142 	struct iscsi_r2t *r2t, *r2t_tmp;
143 
144 	spin_lock_bh(&cmd->r2t_lock);
145 	list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
146 		iscsit_free_r2t(r2t, cmd);
147 	spin_unlock_bh(&cmd->r2t_lock);
148 }
149 
150 /*
151  * May be called from software interrupt (timer) context for allocating
152  * iSCSI NopINs.
153  */
154 struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
155 {
156 	struct iscsi_cmd *cmd;
157 	struct se_session *se_sess = conn->sess->se_sess;
158 	int size, tag;
159 
160 	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
161 	if (tag < 0)
162 		return NULL;
163 
164 	size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
165 	cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
166 	memset(cmd, 0, size);
167 
168 	cmd->se_cmd.map_tag = tag;
169 	cmd->conn = conn;
170 	cmd->data_direction = DMA_NONE;
171 	INIT_LIST_HEAD(&cmd->i_conn_node);
172 	INIT_LIST_HEAD(&cmd->datain_list);
173 	INIT_LIST_HEAD(&cmd->cmd_r2t_list);
174 	spin_lock_init(&cmd->datain_lock);
175 	spin_lock_init(&cmd->dataout_timeout_lock);
176 	spin_lock_init(&cmd->istate_lock);
177 	spin_lock_init(&cmd->error_lock);
178 	spin_lock_init(&cmd->r2t_lock);
179 	timer_setup(&cmd->dataout_timer, iscsit_handle_dataout_timeout, 0);
180 
181 	return cmd;
182 }
183 EXPORT_SYMBOL(iscsit_allocate_cmd);
184 
185 struct iscsi_seq *iscsit_get_seq_holder_for_datain(
186 	struct iscsi_cmd *cmd,
187 	u32 seq_send_order)
188 {
189 	u32 i;
190 
191 	for (i = 0; i < cmd->seq_count; i++)
192 		if (cmd->seq_list[i].seq_send_order == seq_send_order)
193 			return &cmd->seq_list[i];
194 
195 	return NULL;
196 }
197 
198 struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
199 {
200 	u32 i;
201 
202 	if (!cmd->seq_list) {
203 		pr_err("struct iscsi_cmd->seq_list is NULL!\n");
204 		return NULL;
205 	}
206 
207 	for (i = 0; i < cmd->seq_count; i++) {
208 		if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
209 			continue;
210 		if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
211 			cmd->seq_send_order++;
212 			return &cmd->seq_list[i];
213 		}
214 	}
215 
216 	return NULL;
217 }
218 
219 struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
220 	struct iscsi_cmd *cmd,
221 	u32 r2t_sn)
222 {
223 	struct iscsi_r2t *r2t;
224 
225 	spin_lock_bh(&cmd->r2t_lock);
226 	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
227 		if (r2t->r2t_sn == r2t_sn) {
228 			spin_unlock_bh(&cmd->r2t_lock);
229 			return r2t;
230 		}
231 	}
232 	spin_unlock_bh(&cmd->r2t_lock);
233 
234 	return NULL;
235 }
236 
237 static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
238 {
239 	u32 max_cmdsn;
240 	int ret;
241 
242 	/*
243 	 * This is the proper method of checking received CmdSN against
244 	 * ExpCmdSN and MaxCmdSN values, as well as accounting for out
245 	 * or order CmdSNs due to multiple connection sessions and/or
246 	 * CRC failures.
247 	 */
248 	max_cmdsn = atomic_read(&sess->max_cmd_sn);
249 	if (iscsi_sna_gt(cmdsn, max_cmdsn)) {
250 		pr_err("Received CmdSN: 0x%08x is greater than"
251 		       " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn);
252 		ret = CMDSN_MAXCMDSN_OVERRUN;
253 
254 	} else if (cmdsn == sess->exp_cmd_sn) {
255 		sess->exp_cmd_sn++;
256 		pr_debug("Received CmdSN matches ExpCmdSN,"
257 		      " incremented ExpCmdSN to: 0x%08x\n",
258 		      sess->exp_cmd_sn);
259 		ret = CMDSN_NORMAL_OPERATION;
260 
261 	} else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
262 		pr_debug("Received CmdSN: 0x%08x is greater"
263 		      " than ExpCmdSN: 0x%08x, not acknowledging.\n",
264 		      cmdsn, sess->exp_cmd_sn);
265 		ret = CMDSN_HIGHER_THAN_EXP;
266 
267 	} else {
268 		pr_err("Received CmdSN: 0x%08x is less than"
269 		       " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
270 		       sess->exp_cmd_sn);
271 		ret = CMDSN_LOWER_THAN_EXP;
272 	}
273 
274 	return ret;
275 }
276 
277 /*
278  * Commands may be received out of order if MC/S is in use.
279  * Ensure they are executed in CmdSN order.
280  */
281 int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
282 			unsigned char *buf, __be32 cmdsn)
283 {
284 	int ret, cmdsn_ret;
285 	bool reject = false;
286 	u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;
287 
288 	mutex_lock(&conn->sess->cmdsn_mutex);
289 
290 	cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn));
291 	switch (cmdsn_ret) {
292 	case CMDSN_NORMAL_OPERATION:
293 		ret = iscsit_execute_cmd(cmd, 0);
294 		if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
295 			iscsit_execute_ooo_cmdsns(conn->sess);
296 		else if (ret < 0) {
297 			reject = true;
298 			ret = CMDSN_ERROR_CANNOT_RECOVER;
299 		}
300 		break;
301 	case CMDSN_HIGHER_THAN_EXP:
302 		ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
303 		if (ret < 0) {
304 			reject = true;
305 			ret = CMDSN_ERROR_CANNOT_RECOVER;
306 			break;
307 		}
308 		ret = CMDSN_HIGHER_THAN_EXP;
309 		break;
310 	case CMDSN_LOWER_THAN_EXP:
311 	case CMDSN_MAXCMDSN_OVERRUN:
312 	default:
313 		cmd->i_state = ISTATE_REMOVE;
314 		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
315 		/*
316 		 * Existing callers for iscsit_sequence_cmd() will silently
317 		 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
318 		 * return for CMDSN_MAXCMDSN_OVERRUN as well..
319 		 */
320 		ret = CMDSN_LOWER_THAN_EXP;
321 		break;
322 	}
323 	mutex_unlock(&conn->sess->cmdsn_mutex);
324 
325 	if (reject)
326 		iscsit_reject_cmd(cmd, reason, buf);
327 
328 	return ret;
329 }
330 EXPORT_SYMBOL(iscsit_sequence_cmd);
331 
332 int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
333 {
334 	struct iscsi_conn *conn = cmd->conn;
335 	struct se_cmd *se_cmd = &cmd->se_cmd;
336 	struct iscsi_data *hdr = (struct iscsi_data *) buf;
337 	u32 payload_length = ntoh24(hdr->dlength);
338 
339 	if (conn->sess->sess_ops->InitialR2T) {
340 		pr_err("Received unexpected unsolicited data"
341 			" while InitialR2T=Yes, protocol error.\n");
342 		transport_send_check_condition_and_sense(se_cmd,
343 				TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
344 		return -1;
345 	}
346 
347 	if ((cmd->first_burst_len + payload_length) >
348 	     conn->sess->sess_ops->FirstBurstLength) {
349 		pr_err("Total %u bytes exceeds FirstBurstLength: %u"
350 			" for this Unsolicited DataOut Burst.\n",
351 			(cmd->first_burst_len + payload_length),
352 				conn->sess->sess_ops->FirstBurstLength);
353 		transport_send_check_condition_and_sense(se_cmd,
354 				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
355 		return -1;
356 	}
357 
358 	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
359 		return 0;
360 
361 	if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
362 	    ((cmd->first_burst_len + payload_length) !=
363 	      conn->sess->sess_ops->FirstBurstLength)) {
364 		pr_err("Unsolicited non-immediate data received %u"
365 			" does not equal FirstBurstLength: %u, and does"
366 			" not equal ExpXferLen %u.\n",
367 			(cmd->first_burst_len + payload_length),
368 			conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
369 		transport_send_check_condition_and_sense(se_cmd,
370 				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
371 		return -1;
372 	}
373 	return 0;
374 }
375 
376 struct iscsi_cmd *iscsit_find_cmd_from_itt(
377 	struct iscsi_conn *conn,
378 	itt_t init_task_tag)
379 {
380 	struct iscsi_cmd *cmd;
381 
382 	spin_lock_bh(&conn->cmd_lock);
383 	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
384 		if (cmd->init_task_tag == init_task_tag) {
385 			spin_unlock_bh(&conn->cmd_lock);
386 			return cmd;
387 		}
388 	}
389 	spin_unlock_bh(&conn->cmd_lock);
390 
391 	pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
392 			init_task_tag, conn->cid);
393 	return NULL;
394 }
395 EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
396 
397 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
398 	struct iscsi_conn *conn,
399 	itt_t init_task_tag,
400 	u32 length)
401 {
402 	struct iscsi_cmd *cmd;
403 
404 	spin_lock_bh(&conn->cmd_lock);
405 	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
406 		if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT)
407 			continue;
408 		if (cmd->init_task_tag == init_task_tag) {
409 			spin_unlock_bh(&conn->cmd_lock);
410 			return cmd;
411 		}
412 	}
413 	spin_unlock_bh(&conn->cmd_lock);
414 
415 	pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
416 			" dumping payload\n", init_task_tag, conn->cid);
417 	if (length)
418 		iscsit_dump_data_payload(conn, length, 1);
419 
420 	return NULL;
421 }
422 EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump);
423 
424 struct iscsi_cmd *iscsit_find_cmd_from_ttt(
425 	struct iscsi_conn *conn,
426 	u32 targ_xfer_tag)
427 {
428 	struct iscsi_cmd *cmd = NULL;
429 
430 	spin_lock_bh(&conn->cmd_lock);
431 	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
432 		if (cmd->targ_xfer_tag == targ_xfer_tag) {
433 			spin_unlock_bh(&conn->cmd_lock);
434 			return cmd;
435 		}
436 	}
437 	spin_unlock_bh(&conn->cmd_lock);
438 
439 	pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
440 			targ_xfer_tag, conn->cid);
441 	return NULL;
442 }
443 
444 int iscsit_find_cmd_for_recovery(
445 	struct iscsi_session *sess,
446 	struct iscsi_cmd **cmd_ptr,
447 	struct iscsi_conn_recovery **cr_ptr,
448 	itt_t init_task_tag)
449 {
450 	struct iscsi_cmd *cmd = NULL;
451 	struct iscsi_conn_recovery *cr;
452 	/*
453 	 * Scan through the inactive connection recovery list's command list.
454 	 * If init_task_tag matches the command is still alligent.
455 	 */
456 	spin_lock(&sess->cr_i_lock);
457 	list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
458 		spin_lock(&cr->conn_recovery_cmd_lock);
459 		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
460 			if (cmd->init_task_tag == init_task_tag) {
461 				spin_unlock(&cr->conn_recovery_cmd_lock);
462 				spin_unlock(&sess->cr_i_lock);
463 
464 				*cr_ptr = cr;
465 				*cmd_ptr = cmd;
466 				return -2;
467 			}
468 		}
469 		spin_unlock(&cr->conn_recovery_cmd_lock);
470 	}
471 	spin_unlock(&sess->cr_i_lock);
472 	/*
473 	 * Scan through the active connection recovery list's command list.
474 	 * If init_task_tag matches the command is ready to be reassigned.
475 	 */
476 	spin_lock(&sess->cr_a_lock);
477 	list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
478 		spin_lock(&cr->conn_recovery_cmd_lock);
479 		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
480 			if (cmd->init_task_tag == init_task_tag) {
481 				spin_unlock(&cr->conn_recovery_cmd_lock);
482 				spin_unlock(&sess->cr_a_lock);
483 
484 				*cr_ptr = cr;
485 				*cmd_ptr = cmd;
486 				return 0;
487 			}
488 		}
489 		spin_unlock(&cr->conn_recovery_cmd_lock);
490 	}
491 	spin_unlock(&sess->cr_a_lock);
492 
493 	return -1;
494 }
495 
496 void iscsit_add_cmd_to_immediate_queue(
497 	struct iscsi_cmd *cmd,
498 	struct iscsi_conn *conn,
499 	u8 state)
500 {
501 	struct iscsi_queue_req *qr;
502 
503 	qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
504 	if (!qr) {
505 		pr_err("Unable to allocate memory for"
506 				" struct iscsi_queue_req\n");
507 		return;
508 	}
509 	INIT_LIST_HEAD(&qr->qr_list);
510 	qr->cmd = cmd;
511 	qr->state = state;
512 
513 	spin_lock_bh(&conn->immed_queue_lock);
514 	list_add_tail(&qr->qr_list, &conn->immed_queue_list);
515 	atomic_inc(&cmd->immed_queue_count);
516 	atomic_set(&conn->check_immediate_queue, 1);
517 	spin_unlock_bh(&conn->immed_queue_lock);
518 
519 	wake_up(&conn->queues_wq);
520 }
521 EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
522 
523 struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
524 {
525 	struct iscsi_queue_req *qr;
526 
527 	spin_lock_bh(&conn->immed_queue_lock);
528 	if (list_empty(&conn->immed_queue_list)) {
529 		spin_unlock_bh(&conn->immed_queue_lock);
530 		return NULL;
531 	}
532 	qr = list_first_entry(&conn->immed_queue_list,
533 			      struct iscsi_queue_req, qr_list);
534 
535 	list_del(&qr->qr_list);
536 	if (qr->cmd)
537 		atomic_dec(&qr->cmd->immed_queue_count);
538 	spin_unlock_bh(&conn->immed_queue_lock);
539 
540 	return qr;
541 }
542 
543 static void iscsit_remove_cmd_from_immediate_queue(
544 	struct iscsi_cmd *cmd,
545 	struct iscsi_conn *conn)
546 {
547 	struct iscsi_queue_req *qr, *qr_tmp;
548 
549 	spin_lock_bh(&conn->immed_queue_lock);
550 	if (!atomic_read(&cmd->immed_queue_count)) {
551 		spin_unlock_bh(&conn->immed_queue_lock);
552 		return;
553 	}
554 
555 	list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
556 		if (qr->cmd != cmd)
557 			continue;
558 
559 		atomic_dec(&qr->cmd->immed_queue_count);
560 		list_del(&qr->qr_list);
561 		kmem_cache_free(lio_qr_cache, qr);
562 	}
563 	spin_unlock_bh(&conn->immed_queue_lock);
564 
565 	if (atomic_read(&cmd->immed_queue_count)) {
566 		pr_err("ITT: 0x%08x immed_queue_count: %d\n",
567 			cmd->init_task_tag,
568 			atomic_read(&cmd->immed_queue_count));
569 	}
570 }
571 
572 int iscsit_add_cmd_to_response_queue(
573 	struct iscsi_cmd *cmd,
574 	struct iscsi_conn *conn,
575 	u8 state)
576 {
577 	struct iscsi_queue_req *qr;
578 
579 	qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
580 	if (!qr) {
581 		pr_err("Unable to allocate memory for"
582 			" struct iscsi_queue_req\n");
583 		return -ENOMEM;
584 	}
585 	INIT_LIST_HEAD(&qr->qr_list);
586 	qr->cmd = cmd;
587 	qr->state = state;
588 
589 	spin_lock_bh(&conn->response_queue_lock);
590 	list_add_tail(&qr->qr_list, &conn->response_queue_list);
591 	atomic_inc(&cmd->response_queue_count);
592 	spin_unlock_bh(&conn->response_queue_lock);
593 
594 	wake_up(&conn->queues_wq);
595 	return 0;
596 }
597 
598 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
599 {
600 	struct iscsi_queue_req *qr;
601 
602 	spin_lock_bh(&conn->response_queue_lock);
603 	if (list_empty(&conn->response_queue_list)) {
604 		spin_unlock_bh(&conn->response_queue_lock);
605 		return NULL;
606 	}
607 
608 	qr = list_first_entry(&conn->response_queue_list,
609 			      struct iscsi_queue_req, qr_list);
610 
611 	list_del(&qr->qr_list);
612 	if (qr->cmd)
613 		atomic_dec(&qr->cmd->response_queue_count);
614 	spin_unlock_bh(&conn->response_queue_lock);
615 
616 	return qr;
617 }
618 
619 static void iscsit_remove_cmd_from_response_queue(
620 	struct iscsi_cmd *cmd,
621 	struct iscsi_conn *conn)
622 {
623 	struct iscsi_queue_req *qr, *qr_tmp;
624 
625 	spin_lock_bh(&conn->response_queue_lock);
626 	if (!atomic_read(&cmd->response_queue_count)) {
627 		spin_unlock_bh(&conn->response_queue_lock);
628 		return;
629 	}
630 
631 	list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
632 				qr_list) {
633 		if (qr->cmd != cmd)
634 			continue;
635 
636 		atomic_dec(&qr->cmd->response_queue_count);
637 		list_del(&qr->qr_list);
638 		kmem_cache_free(lio_qr_cache, qr);
639 	}
640 	spin_unlock_bh(&conn->response_queue_lock);
641 
642 	if (atomic_read(&cmd->response_queue_count)) {
643 		pr_err("ITT: 0x%08x response_queue_count: %d\n",
644 			cmd->init_task_tag,
645 			atomic_read(&cmd->response_queue_count));
646 	}
647 }
648 
649 bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
650 {
651 	bool empty;
652 
653 	spin_lock_bh(&conn->immed_queue_lock);
654 	empty = list_empty(&conn->immed_queue_list);
655 	spin_unlock_bh(&conn->immed_queue_lock);
656 
657 	if (!empty)
658 		return empty;
659 
660 	spin_lock_bh(&conn->response_queue_lock);
661 	empty = list_empty(&conn->response_queue_list);
662 	spin_unlock_bh(&conn->response_queue_lock);
663 
664 	return empty;
665 }
666 
667 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
668 {
669 	struct iscsi_queue_req *qr, *qr_tmp;
670 
671 	spin_lock_bh(&conn->immed_queue_lock);
672 	list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
673 		list_del(&qr->qr_list);
674 		if (qr->cmd)
675 			atomic_dec(&qr->cmd->immed_queue_count);
676 
677 		kmem_cache_free(lio_qr_cache, qr);
678 	}
679 	spin_unlock_bh(&conn->immed_queue_lock);
680 
681 	spin_lock_bh(&conn->response_queue_lock);
682 	list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
683 			qr_list) {
684 		list_del(&qr->qr_list);
685 		if (qr->cmd)
686 			atomic_dec(&qr->cmd->response_queue_count);
687 
688 		kmem_cache_free(lio_qr_cache, qr);
689 	}
690 	spin_unlock_bh(&conn->response_queue_lock);
691 }
692 
693 void iscsit_release_cmd(struct iscsi_cmd *cmd)
694 {
695 	struct iscsi_session *sess;
696 	struct se_cmd *se_cmd = &cmd->se_cmd;
697 
698 	if (cmd->conn)
699 		sess = cmd->conn->sess;
700 	else
701 		sess = cmd->sess;
702 
703 	BUG_ON(!sess || !sess->se_sess);
704 
705 	kfree(cmd->buf_ptr);
706 	kfree(cmd->pdu_list);
707 	kfree(cmd->seq_list);
708 	kfree(cmd->tmr_req);
709 	kfree(cmd->iov_data);
710 	kfree(cmd->text_in_ptr);
711 
712 	percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag);
713 }
714 EXPORT_SYMBOL(iscsit_release_cmd);
715 
716 void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
717 {
718 	struct iscsi_conn *conn = cmd->conn;
719 
720 	if (cmd->data_direction == DMA_TO_DEVICE) {
721 		iscsit_stop_dataout_timer(cmd);
722 		iscsit_free_r2ts_from_list(cmd);
723 	}
724 	if (cmd->data_direction == DMA_FROM_DEVICE)
725 		iscsit_free_all_datain_reqs(cmd);
726 
727 	if (conn && check_queues) {
728 		iscsit_remove_cmd_from_immediate_queue(cmd, conn);
729 		iscsit_remove_cmd_from_response_queue(cmd, conn);
730 	}
731 
732 	if (conn && conn->conn_transport->iscsit_release_cmd)
733 		conn->conn_transport->iscsit_release_cmd(conn, cmd);
734 }
735 
736 void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
737 {
738 	struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
739 	int rc;
740 
741 	__iscsit_free_cmd(cmd, shutdown);
742 	if (se_cmd) {
743 		rc = transport_generic_free_cmd(se_cmd, shutdown);
744 		if (!rc && shutdown && se_cmd->se_sess) {
745 			__iscsit_free_cmd(cmd, shutdown);
746 			target_put_sess_cmd(se_cmd);
747 		}
748 	} else {
749 		iscsit_release_cmd(cmd);
750 	}
751 }
752 EXPORT_SYMBOL(iscsit_free_cmd);
753 
754 int iscsit_check_session_usage_count(struct iscsi_session *sess)
755 {
756 	spin_lock_bh(&sess->session_usage_lock);
757 	if (sess->session_usage_count != 0) {
758 		sess->session_waiting_on_uc = 1;
759 		spin_unlock_bh(&sess->session_usage_lock);
760 		if (in_interrupt())
761 			return 2;
762 
763 		wait_for_completion(&sess->session_waiting_on_uc_comp);
764 		return 1;
765 	}
766 	spin_unlock_bh(&sess->session_usage_lock);
767 
768 	return 0;
769 }
770 
771 void iscsit_dec_session_usage_count(struct iscsi_session *sess)
772 {
773 	spin_lock_bh(&sess->session_usage_lock);
774 	sess->session_usage_count--;
775 
776 	if (!sess->session_usage_count && sess->session_waiting_on_uc)
777 		complete(&sess->session_waiting_on_uc_comp);
778 
779 	spin_unlock_bh(&sess->session_usage_lock);
780 }
781 
782 void iscsit_inc_session_usage_count(struct iscsi_session *sess)
783 {
784 	spin_lock_bh(&sess->session_usage_lock);
785 	sess->session_usage_count++;
786 	spin_unlock_bh(&sess->session_usage_lock);
787 }
788 
789 struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
790 {
791 	struct iscsi_conn *conn;
792 
793 	spin_lock_bh(&sess->conn_lock);
794 	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
795 		if ((conn->cid == cid) &&
796 		    (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
797 			iscsit_inc_conn_usage_count(conn);
798 			spin_unlock_bh(&sess->conn_lock);
799 			return conn;
800 		}
801 	}
802 	spin_unlock_bh(&sess->conn_lock);
803 
804 	return NULL;
805 }
806 
807 struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
808 {
809 	struct iscsi_conn *conn;
810 
811 	spin_lock_bh(&sess->conn_lock);
812 	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
813 		if (conn->cid == cid) {
814 			iscsit_inc_conn_usage_count(conn);
815 			spin_lock(&conn->state_lock);
816 			atomic_set(&conn->connection_wait_rcfr, 1);
817 			spin_unlock(&conn->state_lock);
818 			spin_unlock_bh(&sess->conn_lock);
819 			return conn;
820 		}
821 	}
822 	spin_unlock_bh(&sess->conn_lock);
823 
824 	return NULL;
825 }
826 
827 void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
828 {
829 	spin_lock_bh(&conn->conn_usage_lock);
830 	if (conn->conn_usage_count != 0) {
831 		conn->conn_waiting_on_uc = 1;
832 		spin_unlock_bh(&conn->conn_usage_lock);
833 
834 		wait_for_completion(&conn->conn_waiting_on_uc_comp);
835 		return;
836 	}
837 	spin_unlock_bh(&conn->conn_usage_lock);
838 }
839 
840 void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
841 {
842 	spin_lock_bh(&conn->conn_usage_lock);
843 	conn->conn_usage_count--;
844 
845 	if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
846 		complete(&conn->conn_waiting_on_uc_comp);
847 
848 	spin_unlock_bh(&conn->conn_usage_lock);
849 }
850 
851 void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
852 {
853 	spin_lock_bh(&conn->conn_usage_lock);
854 	conn->conn_usage_count++;
855 	spin_unlock_bh(&conn->conn_usage_lock);
856 }
857 
858 static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
859 {
860 	u8 state;
861 	struct iscsi_cmd *cmd;
862 
863 	cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
864 	if (!cmd)
865 		return -1;
866 
867 	cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
868 	state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
869 				ISTATE_SEND_NOPIN_NO_RESPONSE;
870 	cmd->init_task_tag = RESERVED_ITT;
871 	cmd->targ_xfer_tag = (want_response) ?
872 			     session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
873 	spin_lock_bh(&conn->cmd_lock);
874 	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
875 	spin_unlock_bh(&conn->cmd_lock);
876 
877 	if (want_response)
878 		iscsit_start_nopin_response_timer(conn);
879 	iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
880 
881 	return 0;
882 }
883 
884 void iscsit_handle_nopin_response_timeout(struct timer_list *t)
885 {
886 	struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer);
887 
888 	iscsit_inc_conn_usage_count(conn);
889 
890 	spin_lock_bh(&conn->nopin_timer_lock);
891 	if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
892 		spin_unlock_bh(&conn->nopin_timer_lock);
893 		iscsit_dec_conn_usage_count(conn);
894 		return;
895 	}
896 
897 	pr_debug("Did not receive response to NOPIN on CID: %hu on"
898 		" SID: %u, failing connection.\n", conn->cid,
899 			conn->sess->sid);
900 	conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
901 	spin_unlock_bh(&conn->nopin_timer_lock);
902 
903 	{
904 	struct iscsi_portal_group *tpg = conn->sess->tpg;
905 	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
906 
907 	if (tiqn) {
908 		spin_lock_bh(&tiqn->sess_err_stats.lock);
909 		strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
910 				conn->sess->sess_ops->InitiatorName);
911 		tiqn->sess_err_stats.last_sess_failure_type =
912 				ISCSI_SESS_ERR_CXN_TIMEOUT;
913 		tiqn->sess_err_stats.cxn_timeout_errors++;
914 		atomic_long_inc(&conn->sess->conn_timeout_errors);
915 		spin_unlock_bh(&tiqn->sess_err_stats.lock);
916 	}
917 	}
918 
919 	iscsit_cause_connection_reinstatement(conn, 0);
920 	iscsit_dec_conn_usage_count(conn);
921 }
922 
923 void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
924 {
925 	struct iscsi_session *sess = conn->sess;
926 	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
927 
928 	spin_lock_bh(&conn->nopin_timer_lock);
929 	if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
930 		spin_unlock_bh(&conn->nopin_timer_lock);
931 		return;
932 	}
933 
934 	mod_timer(&conn->nopin_response_timer,
935 		(get_jiffies_64() + na->nopin_response_timeout * HZ));
936 	spin_unlock_bh(&conn->nopin_timer_lock);
937 }
938 
939 /*
940  *	Called with conn->nopin_timer_lock held.
941  */
942 void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
943 {
944 	struct iscsi_session *sess = conn->sess;
945 	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
946 
947 	spin_lock_bh(&conn->nopin_timer_lock);
948 	if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
949 		spin_unlock_bh(&conn->nopin_timer_lock);
950 		return;
951 	}
952 
953 	conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
954 	conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
955 	mod_timer(&conn->nopin_response_timer,
956 		  jiffies + na->nopin_response_timeout * HZ);
957 
958 	pr_debug("Started NOPIN Response Timer on CID: %d to %u"
959 		" seconds\n", conn->cid, na->nopin_response_timeout);
960 	spin_unlock_bh(&conn->nopin_timer_lock);
961 }
962 
963 void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
964 {
965 	spin_lock_bh(&conn->nopin_timer_lock);
966 	if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
967 		spin_unlock_bh(&conn->nopin_timer_lock);
968 		return;
969 	}
970 	conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
971 	spin_unlock_bh(&conn->nopin_timer_lock);
972 
973 	del_timer_sync(&conn->nopin_response_timer);
974 
975 	spin_lock_bh(&conn->nopin_timer_lock);
976 	conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
977 	spin_unlock_bh(&conn->nopin_timer_lock);
978 }
979 
980 void iscsit_handle_nopin_timeout(struct timer_list *t)
981 {
982 	struct iscsi_conn *conn = from_timer(conn, t, nopin_timer);
983 
984 	iscsit_inc_conn_usage_count(conn);
985 
986 	spin_lock_bh(&conn->nopin_timer_lock);
987 	if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
988 		spin_unlock_bh(&conn->nopin_timer_lock);
989 		iscsit_dec_conn_usage_count(conn);
990 		return;
991 	}
992 	conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
993 	spin_unlock_bh(&conn->nopin_timer_lock);
994 
995 	iscsit_add_nopin(conn, 1);
996 	iscsit_dec_conn_usage_count(conn);
997 }
998 
999 /*
1000  * Called with conn->nopin_timer_lock held.
1001  */
1002 void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
1003 {
1004 	struct iscsi_session *sess = conn->sess;
1005 	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1006 	/*
1007 	* NOPIN timeout is disabled.
1008 	 */
1009 	if (!na->nopin_timeout)
1010 		return;
1011 
1012 	if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
1013 		return;
1014 
1015 	conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1016 	conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1017 	mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
1018 
1019 	pr_debug("Started NOPIN Timer on CID: %d at %u second"
1020 		" interval\n", conn->cid, na->nopin_timeout);
1021 }
1022 
1023 void iscsit_start_nopin_timer(struct iscsi_conn *conn)
1024 {
1025 	struct iscsi_session *sess = conn->sess;
1026 	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
1027 	/*
1028 	 * NOPIN timeout is disabled..
1029 	 */
1030 	if (!na->nopin_timeout)
1031 		return;
1032 
1033 	spin_lock_bh(&conn->nopin_timer_lock);
1034 	if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
1035 		spin_unlock_bh(&conn->nopin_timer_lock);
1036 		return;
1037 	}
1038 
1039 	conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1040 	conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1041 	mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
1042 
1043 	pr_debug("Started NOPIN Timer on CID: %d at %u second"
1044 			" interval\n", conn->cid, na->nopin_timeout);
1045 	spin_unlock_bh(&conn->nopin_timer_lock);
1046 }
1047 
1048 void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
1049 {
1050 	spin_lock_bh(&conn->nopin_timer_lock);
1051 	if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
1052 		spin_unlock_bh(&conn->nopin_timer_lock);
1053 		return;
1054 	}
1055 	conn->nopin_timer_flags |= ISCSI_TF_STOP;
1056 	spin_unlock_bh(&conn->nopin_timer_lock);
1057 
1058 	del_timer_sync(&conn->nopin_timer);
1059 
1060 	spin_lock_bh(&conn->nopin_timer_lock);
1061 	conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1062 	spin_unlock_bh(&conn->nopin_timer_lock);
1063 }
1064 
1065 int iscsit_send_tx_data(
1066 	struct iscsi_cmd *cmd,
1067 	struct iscsi_conn *conn,
1068 	int use_misc)
1069 {
1070 	int tx_sent, tx_size;
1071 	u32 iov_count;
1072 	struct kvec *iov;
1073 
1074 send_data:
1075 	tx_size = cmd->tx_size;
1076 
1077 	if (!use_misc) {
1078 		iov = &cmd->iov_data[0];
1079 		iov_count = cmd->iov_data_count;
1080 	} else {
1081 		iov = &cmd->iov_misc[0];
1082 		iov_count = cmd->iov_misc_count;
1083 	}
1084 
1085 	tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
1086 	if (tx_size != tx_sent) {
1087 		if (tx_sent == -EAGAIN) {
1088 			pr_err("tx_data() returned -EAGAIN\n");
1089 			goto send_data;
1090 		} else
1091 			return -1;
1092 	}
1093 	cmd->tx_size = 0;
1094 
1095 	return 0;
1096 }
1097 
1098 int iscsit_fe_sendpage_sg(
1099 	struct iscsi_cmd *cmd,
1100 	struct iscsi_conn *conn)
1101 {
1102 	struct scatterlist *sg = cmd->first_data_sg;
1103 	struct kvec iov;
1104 	u32 tx_hdr_size, data_len;
1105 	u32 offset = cmd->first_data_sg_off;
1106 	int tx_sent, iov_off;
1107 
1108 send_hdr:
1109 	tx_hdr_size = ISCSI_HDR_LEN;
1110 	if (conn->conn_ops->HeaderDigest)
1111 		tx_hdr_size += ISCSI_CRC_LEN;
1112 
1113 	iov.iov_base = cmd->pdu;
1114 	iov.iov_len = tx_hdr_size;
1115 
1116 	tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
1117 	if (tx_hdr_size != tx_sent) {
1118 		if (tx_sent == -EAGAIN) {
1119 			pr_err("tx_data() returned -EAGAIN\n");
1120 			goto send_hdr;
1121 		}
1122 		return -1;
1123 	}
1124 
1125 	data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
1126 	/*
1127 	 * Set iov_off used by padding and data digest tx_data() calls below
1128 	 * in order to determine proper offset into cmd->iov_data[]
1129 	 */
1130 	if (conn->conn_ops->DataDigest) {
1131 		data_len -= ISCSI_CRC_LEN;
1132 		if (cmd->padding)
1133 			iov_off = (cmd->iov_data_count - 2);
1134 		else
1135 			iov_off = (cmd->iov_data_count - 1);
1136 	} else {
1137 		iov_off = (cmd->iov_data_count - 1);
1138 	}
1139 	/*
1140 	 * Perform sendpage() for each page in the scatterlist
1141 	 */
1142 	while (data_len) {
1143 		u32 space = (sg->length - offset);
1144 		u32 sub_len = min_t(u32, data_len, space);
1145 send_pg:
1146 		tx_sent = conn->sock->ops->sendpage(conn->sock,
1147 					sg_page(sg), sg->offset + offset, sub_len, 0);
1148 		if (tx_sent != sub_len) {
1149 			if (tx_sent == -EAGAIN) {
1150 				pr_err("tcp_sendpage() returned"
1151 						" -EAGAIN\n");
1152 				goto send_pg;
1153 			}
1154 
1155 			pr_err("tcp_sendpage() failure: %d\n",
1156 					tx_sent);
1157 			return -1;
1158 		}
1159 
1160 		data_len -= sub_len;
1161 		offset = 0;
1162 		sg = sg_next(sg);
1163 	}
1164 
1165 send_padding:
1166 	if (cmd->padding) {
1167 		struct kvec *iov_p = &cmd->iov_data[iov_off++];
1168 
1169 		tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
1170 		if (cmd->padding != tx_sent) {
1171 			if (tx_sent == -EAGAIN) {
1172 				pr_err("tx_data() returned -EAGAIN\n");
1173 				goto send_padding;
1174 			}
1175 			return -1;
1176 		}
1177 	}
1178 
1179 send_datacrc:
1180 	if (conn->conn_ops->DataDigest) {
1181 		struct kvec *iov_d = &cmd->iov_data[iov_off];
1182 
1183 		tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
1184 		if (ISCSI_CRC_LEN != tx_sent) {
1185 			if (tx_sent == -EAGAIN) {
1186 				pr_err("tx_data() returned -EAGAIN\n");
1187 				goto send_datacrc;
1188 			}
1189 			return -1;
1190 		}
1191 	}
1192 
1193 	return 0;
1194 }
1195 
1196 /*
1197  *      This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
1198  *      back to the Initiator when an expection condition occurs with the
1199  *      errors set in status_class and status_detail.
1200  *
1201  *      Parameters:     iSCSI Connection, Status Class, Status Detail.
1202  *      Returns:        0 on success, -1 on error.
1203  */
1204 int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
1205 {
1206 	struct iscsi_login_rsp *hdr;
1207 	struct iscsi_login *login = conn->conn_login;
1208 
1209 	login->login_failed = 1;
1210 	iscsit_collect_login_stats(conn, status_class, status_detail);
1211 
1212 	memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
1213 
1214 	hdr	= (struct iscsi_login_rsp *)&login->rsp[0];
1215 	hdr->opcode		= ISCSI_OP_LOGIN_RSP;
1216 	hdr->status_class	= status_class;
1217 	hdr->status_detail	= status_detail;
1218 	hdr->itt		= conn->login_itt;
1219 
1220 	return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
1221 }
1222 
1223 void iscsit_print_session_params(struct iscsi_session *sess)
1224 {
1225 	struct iscsi_conn *conn;
1226 
1227 	pr_debug("-----------------------------[Session Params for"
1228 		" SID: %u]-----------------------------\n", sess->sid);
1229 	spin_lock_bh(&sess->conn_lock);
1230 	list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
1231 		iscsi_dump_conn_ops(conn->conn_ops);
1232 	spin_unlock_bh(&sess->conn_lock);
1233 
1234 	iscsi_dump_sess_ops(sess->sess_ops);
1235 }
1236 
1237 static int iscsit_do_rx_data(
1238 	struct iscsi_conn *conn,
1239 	struct iscsi_data_count *count)
1240 {
1241 	int data = count->data_length, rx_loop = 0, total_rx = 0;
1242 	struct msghdr msg;
1243 
1244 	if (!conn || !conn->sock || !conn->conn_ops)
1245 		return -1;
1246 
1247 	memset(&msg, 0, sizeof(struct msghdr));
1248 	iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC,
1249 		      count->iov, count->iov_count, data);
1250 
1251 	while (msg_data_left(&msg)) {
1252 		rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
1253 		if (rx_loop <= 0) {
1254 			pr_debug("rx_loop: %d total_rx: %d\n",
1255 				rx_loop, total_rx);
1256 			return rx_loop;
1257 		}
1258 		total_rx += rx_loop;
1259 		pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
1260 				rx_loop, total_rx, data);
1261 	}
1262 
1263 	return total_rx;
1264 }
1265 
1266 int rx_data(
1267 	struct iscsi_conn *conn,
1268 	struct kvec *iov,
1269 	int iov_count,
1270 	int data)
1271 {
1272 	struct iscsi_data_count c;
1273 
1274 	if (!conn || !conn->sock || !conn->conn_ops)
1275 		return -1;
1276 
1277 	memset(&c, 0, sizeof(struct iscsi_data_count));
1278 	c.iov = iov;
1279 	c.iov_count = iov_count;
1280 	c.data_length = data;
1281 	c.type = ISCSI_RX_DATA;
1282 
1283 	return iscsit_do_rx_data(conn, &c);
1284 }
1285 
1286 int tx_data(
1287 	struct iscsi_conn *conn,
1288 	struct kvec *iov,
1289 	int iov_count,
1290 	int data)
1291 {
1292 	struct msghdr msg;
1293 	int total_tx = 0;
1294 
1295 	if (!conn || !conn->sock || !conn->conn_ops)
1296 		return -1;
1297 
1298 	if (data <= 0) {
1299 		pr_err("Data length is: %d\n", data);
1300 		return -1;
1301 	}
1302 
1303 	memset(&msg, 0, sizeof(struct msghdr));
1304 
1305 	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC,
1306 		      iov, iov_count, data);
1307 
1308 	while (msg_data_left(&msg)) {
1309 		int tx_loop = sock_sendmsg(conn->sock, &msg);
1310 		if (tx_loop <= 0) {
1311 			pr_debug("tx_loop: %d total_tx %d\n",
1312 				tx_loop, total_tx);
1313 			return tx_loop;
1314 		}
1315 		total_tx += tx_loop;
1316 		pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1317 					tx_loop, total_tx, data);
1318 	}
1319 
1320 	return total_tx;
1321 }
1322 
1323 void iscsit_collect_login_stats(
1324 	struct iscsi_conn *conn,
1325 	u8 status_class,
1326 	u8 status_detail)
1327 {
1328 	struct iscsi_param *intrname = NULL;
1329 	struct iscsi_tiqn *tiqn;
1330 	struct iscsi_login_stats *ls;
1331 
1332 	tiqn = iscsit_snmp_get_tiqn(conn);
1333 	if (!tiqn)
1334 		return;
1335 
1336 	ls = &tiqn->login_stats;
1337 
1338 	spin_lock(&ls->lock);
1339 	if (status_class == ISCSI_STATUS_CLS_SUCCESS)
1340 		ls->accepts++;
1341 	else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
1342 		ls->redirects++;
1343 		ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
1344 	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
1345 		 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
1346 		ls->authenticate_fails++;
1347 		ls->last_fail_type =  ISCSI_LOGIN_FAIL_AUTHENTICATE;
1348 	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
1349 		 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
1350 		ls->authorize_fails++;
1351 		ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
1352 	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1353 		 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
1354 		ls->negotiate_fails++;
1355 		ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
1356 	} else {
1357 		ls->other_fails++;
1358 		ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
1359 	}
1360 
1361 	/* Save initiator name, ip address and time, if it is a failed login */
1362 	if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
1363 		if (conn->param_list)
1364 			intrname = iscsi_find_param_from_key(INITIATORNAME,
1365 							     conn->param_list);
1366 		strlcpy(ls->last_intr_fail_name,
1367 		       (intrname ? intrname->value : "Unknown"),
1368 		       sizeof(ls->last_intr_fail_name));
1369 
1370 		ls->last_intr_fail_ip_family = conn->login_family;
1371 
1372 		ls->last_intr_fail_sockaddr = conn->login_sockaddr;
1373 		ls->last_fail_time = get_jiffies_64();
1374 	}
1375 
1376 	spin_unlock(&ls->lock);
1377 }
1378 
1379 struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
1380 {
1381 	struct iscsi_portal_group *tpg;
1382 
1383 	if (!conn)
1384 		return NULL;
1385 
1386 	tpg = conn->tpg;
1387 	if (!tpg)
1388 		return NULL;
1389 
1390 	if (!tpg->tpg_tiqn)
1391 		return NULL;
1392 
1393 	return tpg->tpg_tiqn;
1394 }
1395